diff --git a/.config/taplo.toml b/.config/taplo.toml index 2c6ccfb2b34440686764c39ed6db1c73ed940f06..7cbc1b075125ad237f16d5d7dd33b0de7089ac38 100644 --- a/.config/taplo.toml +++ b/.config/taplo.toml @@ -33,3 +33,10 @@ keys = ["build"] [rule.formatting] reorder_arrays = false + +[[rule]] +include = ["Cargo.toml"] +keys = ["workspace.dependencies"] + +[rule.formatting] +reorder_keys = true diff --git a/.forklift/config-gitlab.toml b/.forklift/config-gitlab.toml new file mode 100644 index 0000000000000000000000000000000000000000..ab3b2729a46d4e54dc77df1175d4ebe79eda46d0 --- /dev/null +++ b/.forklift/config-gitlab.toml @@ -0,0 +1,33 @@ +[compression] +type = "zstd" + +[compression.zstd] +compressionLevel = 3 + +[general] +jobNameVariable = "CI_JOB_NAME" +jobsBlackList = [] +logLevel = "warn" +threadsCount = 6 + +[cache] +extraEnv = ["RUNTIME_METADATA_HASH"] + +[metrics] +enabled = true +pushEndpoint = "placeholder" + +[metrics.extraLabels] +environment = "production" +job_name = "$CI_JOB_NAME" +project_name = "$CI_PROJECT_PATH" + +[storage] +type = "s3" + +[storage.s3] +accessKeyId = "placeholder" +bucketName = "placeholder" +concurrency = 10 +endpointUrl = "placeholder" +secretAccessKey = "placeholder" diff --git a/.forklift/config.toml b/.forklift/config.toml index ab3b2729a46d4e54dc77df1175d4ebe79eda46d0..6f8eed8882ea36f39d1f0a519180f92eb905f7be 100644 --- a/.forklift/config.toml +++ b/.forklift/config.toml @@ -23,11 +23,7 @@ job_name = "$CI_JOB_NAME" project_name = "$CI_PROJECT_PATH" [storage] -type = "s3" +type = "gcs" -[storage.s3] -accessKeyId = "placeholder" -bucketName = "placeholder" -concurrency = 10 -endpointUrl = "placeholder" -secretAccessKey = "placeholder" +[storage.gcs] +bucketName = "parity-ci-forklift" diff --git a/.github/actions/set-up-gh/action.yml b/.github/actions/set-up-gh/action.yml new file mode 100644 index 0000000000000000000000000000000000000000..fc16ce0b26334283b83266e381c1811738be87c9 --- /dev/null +++ b/.github/actions/set-up-gh/action.yml @@ -0,0 +1,36 @@ +name: 'install gh' +description: 'Install the gh cli in a debian based distro and switches to the PR branch.' +inputs: + pr-number: + description: "Number of the PR" + required: true + GH_TOKEN: + description: "GitHub token" + required: true +outputs: + branch: + description: 'Branch name for the PR' + value: ${{ steps.branch.outputs.branch }} +runs: + using: "composite" + steps: + - name: Instal gh cli + shell: bash + # Here it would get the script from previous step + run: | + (type -p wget >/dev/null || (apt update && apt-get install wget -y)) + mkdir -p -m 755 /etc/apt/keyrings + wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null + chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null + apt update + apt install gh -y + git config --global --add safe.directory '*' + - run: gh pr checkout ${{ inputs.pr-number }} + shell: bash + env: + GITHUB_TOKEN: ${{ inputs.GH_TOKEN }} + - name: Export branch name + shell: bash + run: echo "branch=$(git rev-parse --abbrev-ref HEAD)" >> "$GITHUB_OUTPUT" + id: branch diff --git a/.github/command-screnshot.png b/.github/command-screnshot.png new file mode 100644 index 0000000000000000000000000000000000000000..1451fabca8b975534778e8321facd261e3b803fb Binary files /dev/null and b/.github/command-screnshot.png differ diff --git a/.github/commands-readme.md b/.github/commands-readme.md new file mode 100644 index 0000000000000000000000000000000000000000..20644c048c6035a0f92a1c0ed0e96db0896bb3bd --- /dev/null +++ b/.github/commands-readme.md @@ -0,0 +1,200 @@ +# Running commands + +Command bot has been migrated, it is no longer a comment parser and now it is a GitHub action that works as a [`workflow_dispatch`](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch) event. + +## How to run an action + +To run an action, you need to go to the [_actions tab_](https://github.com/paritytech/polkadot-sdk/actions) and pick the one you desire to run. + +The current available command actions are: + +- [Command FMT](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-fmt.yml) +- [Command Update UI](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-update-ui.yml) +- [Command Sync](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-sync.yml) +- [Command Bench](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench.yml) +- [Command Bench All](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench-all.yml) +- [Command Bench Overhead](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench-overhead.yml) + +You need to select the action, and click on the dropdown that says: `Run workflow`. It is located in the upper right. + +If this dropdown is not visible, you may not have permission to run the action. Contact IT for help. + +![command screenshot](command-screnshot.png) + +Each command will have the same two required values, but it could have more. + +GitHub's official documentation: [Manually running a workflow](https://docs.github.com/en/actions/using-workflows/manually-running-a-workflow) + +### Number of the Pull Request + +The number of the pull request. Required so the action can fetch the correct branch and comment if it fails. + +## Action configurations + +### Bench + +Runs `benchmark pallet` or `benchmark overhead` against your PR and commits back updated weights. + +Posible combinations based on the `benchmark` dropdown. + +- `substrate-pallet`: Pallet Benchmark for Substrate for specific pallet + - Requires `Subcommand` to be `pallet` + - Requires `Runtime` to be `dev` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Target Directory` to be `substrate` +- `polkadot-pallet`: Pallet Benchmark for Polkadot for specific pallet + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `rococo` + - `westend` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Target Directory` to be `polkadot` +- `cumulus-assets`: Pallet Benchmark for Cumulus assets + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `asset-hub-westend` + - `asset-hub-rococo` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `assets` + - Requires `Target Directory` to be `cumulus` +- `cumulus-collectives`: Pallet Benchmark for Cumulus collectives + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be `collectives-westend` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `collectives` + - Requires `Target Directory` to be `cumulus` +- `cumulus-coretime`: Pallet Benchmark for Cumulus coretime + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `coretime-rococo` + - `coretime-westend` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `coretime` + - Requires `Target Directory` to be `cumulus` +- `cumulus-bridge-hubs`: Pallet Benchmark for Cumulus bridge-hubs + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `bridge-hub-rococo` + - `bridge-hub-westend` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `bridge-hub` + - Requires `Target Directory` to be `cumulus` +- `cumulus-contracts`: Pallet Benchmark for Cumulus contracts + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one `contracts-rococo` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `contracts` + - Requires `Target Directory` to be `cumulus` +- `cumulus-glutton`: Pallet Benchmark for Cumulus glutton + - Requires `Subcommand` to be `pallet` + - Requires `Runtime` to be one of the following: + - `glutton-westend` + - `glutton-westend-dev-1300` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `glutton` + - Requires `Target Directory` to be `cumulus` +- `cumulus-starters`: Pallet Benchmark for Cumulus starters + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `seedling` + - `shell` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `starters` + - Requires `Target Directory` to be `cumulus` +- `cumulus-people`: Pallet Benchmark for Cumulus people + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `people-westend` + - `people-rococo` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `people` + - Requires `Target Directory` to be `cumulus` +- `cumulus-testing`: Pallet Benchmark for Cumulus testing + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `penpal` + - `rococo-parachain` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `testing` + - Requires `Target Directory` to be `cumulus` + +### Bench-all + +This is a wrapper to run `bench` for all pallets. + +Posible combinations based on the `benchmark` dropdown. + +- `pallet`: Benchmark for Substrate/Polkadot/Cumulus/Trappist for specific pallet + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` +- `substrate`: Pallet + Overhead + Machine Benchmark for Substrate for all pallets + - Requires `Target Directory` to be `substrate` +- `polkadot`: Pallet + Overhead Benchmark for Polkadot + - Requires `Runtime` to be one of the following: + - `rococo` + - `westend` + - Requires `Target Directory` to be `polkadot` +- `cumulus`: Pallet Benchmark for Cumulus + - Requires `Runtime` to be one of the following: + - `rococo` + - `westend` + - `asset-hub-kusama` + - `asset-hub-polkadot` + - `asset-hub-rococo` + - `asset-hub-westend` + - `bridge-hub-kusama` + - `bridge-hub-polkadot` + - `bridge-hub-rococo` + - `bridge-hub-westend` + - `collectives-polkadot` + - `collectives-westend` + - `coretime-rococo` + - `coretime-westend` + - `contracts-rococo` + - `glutton-kusama` + - `glutton-westend` + - `people-rococo` + - `people-westend` + - Requires `Target Directory` to be `cumulus` + +### Bench-overhead + +Run benchmarks overhead and commit back results to PR. + +Posible combinations based on the `benchmark` dropdown. + +- `default`: Runs `benchmark overhead` and commits back to PR the updated `extrinsic_weights.rs` files + - Requires `Runtime` to be one of the following: + - `rococo` + - `westend` + - Requires `Target directory` to be `polkadot` +- `substrate`: Runs `benchmark overhead` and commits back to PR the updated `extrinsic_weights.rs` files + - Requires `Target directory` to be `substrate` +- `cumulus`: Runs `benchmark overhead` and commits back to PR the updated `extrinsic_weights.rs` files + - Requires `Runtime` to be one of the following: + - `asset-hub-rococo` + - `asset-hub-westend` + - Requires `Target directory` to be `cumulus` + +## How to modify an action + +If you want to modify an action and test it, you can do by simply pushing your changes and then selecting your branch in the `Use worflow from` option. + +This will use a file from a specified branch. diff --git a/.github/review-bot.yml b/.github/review-bot.yml index ed719cefec8bc97c921e11a1751889433f0991ea..adbc480c6ba1a69e8cdd112af4be378849d26315 100644 --- a/.github/review-bot.yml +++ b/.github/review-bot.yml @@ -9,6 +9,7 @@ rules: - ^\.gitlab/.* - ^\.config/nextest.toml - ^\.cargo/.* + - ^\.forklift/.* exclude: - ^\.gitlab/pipeline/zombienet.* type: "or" @@ -33,6 +34,7 @@ rules: - ^docker/.* - ^\.github/.* - ^\.gitlab/.* + - ^\.forklift/.* - ^\.config/nextest.toml - ^\.cargo/.* minApprovals: 2 diff --git a/.github/scripts/check-workspace.py b/.github/scripts/check-workspace.py index 1f8f103e4e157a8c1c804a618652741193ca5a00..d5197100ad253ed18b9a4df255faa88598883f91 100644 --- a/.github/scripts/check-workspace.py +++ b/.github/scripts/check-workspace.py @@ -135,8 +135,12 @@ def check_links(all_crates): if dep_name in all_crates: links.append((name, dep_name)) - if not 'path' in deps[dep]: - broken.append((name, dep_name, "crate must be linked via `path`")) + if name == 'polkadot-sdk': + if not 'path' in deps[dep]: + broken.append((name, dep_name, "crate must use path")) + return + elif not 'workspace' in deps[dep] or not deps[dep]['workspace']: + broken.append((name, dep_name, "crate must use workspace inheritance")) return def check_crate(deps): @@ -154,8 +158,6 @@ def check_links(all_crates): check_crate(manifest) - - links.sort() broken.sort() diff --git a/.github/scripts/deny-git-deps.py b/.github/scripts/deny-git-deps.py index 4b831c9347f75bdc3c74c80d3af652c37e7ae459..622fc64c488123a153b438af8516f3fa65133776 100644 --- a/.github/scripts/deny-git-deps.py +++ b/.github/scripts/deny-git-deps.py @@ -19,6 +19,7 @@ KNOWN_BAD_GIT_DEPS = { root = sys.argv[1] if len(sys.argv) > 1 else os.getcwd() workspace = Workspace.from_path(root) +errors = [] def check_dep(dep, used_by): if dep.location != DependencyLocation.GIT: @@ -27,14 +28,23 @@ def check_dep(dep, used_by): if used_by in KNOWN_BAD_GIT_DEPS.get(dep.name, []): print(f'๐Ÿคจ Ignoring git dependency {dep.name} in {used_by}') else: - print(f'๐Ÿšซ Found git dependency {dep.name} in {used_by}') - sys.exit(1) + errors.append(f'๐Ÿšซ Found git dependency {dep.name} in {used_by}') # Check the workspace dependencies that can be inherited: for dep in workspace.dependencies: check_dep(dep, "workspace") + if workspace.crates.find_by_name(dep.name): + if dep.location != DependencyLocation.PATH: + errors.append(f'๐Ÿšซ Workspace must use path to link local dependency {dep.name}') + # And the dependencies of each crate: for crate in workspace.crates: for dep in crate.dependencies: check_dep(dep, crate.name) + +if errors: + print('โŒ Found errors:') + for error in errors: + print(error) + sys.exit(1) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index c31dee06ec54a0154efc3ad46ff24c79de4d0d7b..5df03f1044d88a7bcf9369c054235747f9f049b1 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -6,7 +6,7 @@ on: merge_group: env: - IMAGE: docker.io/paritytech/prdoc:v0.0.7 + IMAGE: docker.io/paritytech/prdoc:v0.0.8 API_BASE: https://api.github.com/repos REPO: ${{ github.repository }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml index 671673c02c09e0e0b5babceca6819c3ae7f05af7..33da5a8ecd591535eb0d4fe8b63cd8801699e988 100644 --- a/.github/workflows/check-runtime-migration.yml +++ b/.github/workflows/check-runtime-migration.yml @@ -11,13 +11,6 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -env: - FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} - FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} - FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} - FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} - FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} - jobs: set-image: # GitHub Actions allows using 'env' in a container context. diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 04c63f4192b29ca1773d1018698b2abe6a666e1c..47f9e5061b4aec5be38937351ae072bd58f84f37 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -5,6 +5,8 @@ on: types: [opened, synchronize, reopened, ready_for_review] paths: - prdoc/*.prdoc +env: + TOOLCHAIN: nightly-2024-03-01 jobs: check-semver: @@ -19,14 +21,14 @@ jobs: with: cache-on-failure: true + - name: install parity-publish + run: cargo install parity-publish@0.6.0 + - name: Rust compilation prerequisites run: | - rustup default nightly-2024-03-01 - rustup target add wasm32-unknown-unknown --toolchain nightly-2024-03-01 - rustup component add rust-src --toolchain nightly-2024-03-01 - - - name: install parity-publish - run: cargo install parity-publish@0.5.1 + rustup default $TOOLCHAIN + rustup target add wasm32-unknown-unknown --toolchain $TOOLCHAIN + rustup component add rust-src --toolchain $TOOLCHAIN - name: extra git setup run: | @@ -39,7 +41,7 @@ jobs: export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' export SKIP_WASM_BUILD=1 - if ! parity-publish --color always prdoc --since old --validate prdoc/pr_$PR.prdoc --toolchain nightly-2024-03-01 -v; then + if ! parity-publish --color always prdoc --since old --validate prdoc/pr_$PR.prdoc -v --toolchain $TOOLCHAIN; then cat <> $GITHUB_OUTPUT fmt: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -40,7 +40,7 @@ jobs: run: cargo +nightly fmt --all -- --check check-dependency-rules: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: check dependency rules @@ -49,17 +49,23 @@ jobs: ../.gitlab/ensure-deps.sh check-rust-feature-propagation: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: fetch deps + run: | + # Pull all dependencies eagerly: + time cargo metadata --format-version=1 --locked > /dev/null - name: run zepter - run: zepter run check + run: | + zepter --version + time zepter run check test-rust-features: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -69,7 +75,7 @@ jobs: run: bash .gitlab/rust-features.sh . check-toml-format: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -81,7 +87,7 @@ jobs: echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" check-workspace: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.0 (22. Sep 2023) - name: install python deps @@ -98,7 +104,7 @@ jobs: run: python3 .github/scripts/deny-git-deps.py . check-markdown: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 steps: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -121,7 +127,7 @@ jobs: markdownlint --config "$CONFIG" --ignore target . check-umbrella: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} diff --git a/.github/workflows/command-bench-all.yml b/.github/workflows/command-bench-all.yml new file mode 100644 index 0000000000000000000000000000000000000000..6aa4f6f7ff003a14b8294ee2324bd1e13011b951 --- /dev/null +++ b/.github/workflows/command-bench-all.yml @@ -0,0 +1,96 @@ +name: Command Bench All + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + benchmark: + description: Pallet benchmark + type: choice + required: true + options: + - pallet + - substrate + - polkadot + - cumulus + pallet: + description: Pallet + required: false + type: string + default: pallet_name + target_dir: + description: Target directory + type: choice + options: + - substrate + - polkadot + - cumulus + runtime: + description: Runtime + type: choice + options: + - rococo + - westend + - asset-hub-kusama + - asset-hub-polkadot + - asset-hub-rococo + - asset-hub-westend + - bridge-hub-kusama + - bridge-hub-polkadot + - bridge-hub-rococo + - bridge-hub-westend + - collectives-polkadot + - collectives-westend + - coretime-rococo + - coretime-westend + - contracts-rococo + - glutton-kusama + - glutton-westend + - people-rococo + - people-westend + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-bench-all: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-weights + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run bench all + run: | + "./scripts/bench-all.sh" "${{ inputs.benchmark }}" --runtime "${{ inputs.runtime }}" --pallet "${{ inputs.pallet }}" --target_dir "${{ inputs.target_dir }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-bench-overhead.yml b/.github/workflows/command-bench-overhead.yml new file mode 100644 index 0000000000000000000000000000000000000000..16cbcefcf26974f169d0cd5a1f946af766f18a21 --- /dev/null +++ b/.github/workflows/command-bench-overhead.yml @@ -0,0 +1,75 @@ +name: Command Bench Overhead + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + benchmark: + description: Pallet benchmark + type: choice + required: true + options: + - default + - substrate + - cumulus + runtime: + description: Runtime + type: choice + options: + - rococo + - westend + - asset-hub-rococo + - asset-hub-westend + target_dir: + description: Target directory + type: choice + options: + - polkadot + - substrate + - cumulus + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-bench-overhead: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-benchmark + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run bench overhead + run: | + "./scripts/bench.sh" "${{ inputs.benchmark }}" --subcommand "overhead" --runtime "${{ inputs.runtime }}" --target_dir "${{ inputs.target_dir }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-bench.yml b/.github/workflows/command-bench.yml new file mode 100644 index 0000000000000000000000000000000000000000..b23b06d1b3c043113c2f226b5e1cd32bf5f4b022 --- /dev/null +++ b/.github/workflows/command-bench.yml @@ -0,0 +1,121 @@ +name: Command Bench + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + benchmark: + description: Pallet benchmark + type: choice + required: true + options: + - substrate-pallet + - polkadot-pallet + - cumulus-assets + - cumulus-collectives + - cumulus-coretime + - cumulus-bridge-hubs + - cumulus-contracts + - cumulus-glutton + - cumulus-starters + - cumulus-people + - cumulus-testing + subcommand: + description: Subcommand + type: choice + required: true + options: + - pallet + - xcm + runtime: + description: Runtime + type: choice + options: + - dev + - rococo + - westend + - asset-hub-westend + - asset-hub-rococo + - collectives-westend + - coretime-rococo + - coretime-westend + - bridge-hub-rococo + - bridge-hub-westend + - contracts-rococo + - glutton-westend + - glutton-westend-dev-1300 + - seedling + - shell + - people-westend + - people-rococo + - penpal + - rococo-parachain + pallet: + description: Pallet + type: string + default: pallet_name + target_dir: + description: Target directory + type: choice + options: + - substrate + - polkadot + - cumulus + runtime_dir: + description: Runtime directory + type: choice + options: + - people + - collectives + - coretime + - bridge-hubs + - contracts + - glutton + - starters + - testing + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-bench: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-benchmark + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run bench + run: | + "./scripts/bench.sh" "${{ inputs.benchmark }}" --runtime "${{ inputs.runtime }}" --pallet "${{ inputs.pallet }}" --target_dir "${{ inputs.target_dir }}" --subcommand "${{ inputs.subcommand }}" --runtime_dir "${{ inputs.runtime_dir }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-fmt.yml b/.github/workflows/command-fmt.yml new file mode 100644 index 0000000000000000000000000000000000000000..c949d0768d7a7d2560dbc154a18082600b8132b5 --- /dev/null +++ b/.github/workflows/command-fmt.yml @@ -0,0 +1,55 @@ +name: Command FMT + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-fmt: + needs: [set-image] + runs-on: ubuntu-latest + timeout-minutes: 20 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run FMT + run: | + # format toml. + # since paritytech/ci-unified:bullseye-1.73.0-2023-11-01-v20231204 includes taplo-cli + taplo format --config .config/taplo.toml + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-inform.yml b/.github/workflows/command-inform.yml new file mode 100644 index 0000000000000000000000000000000000000000..2825f4a604605fea8465ce0646d664c9ef5d38f5 --- /dev/null +++ b/.github/workflows/command-inform.yml @@ -0,0 +1,21 @@ +name: Inform of new command action + +on: + issue_comment: + types: [created] + +jobs: + comment: + runs-on: ubuntu-latest + if: github.event.issue.pull_request && startsWith(github.event.comment.body, 'bot ') + steps: + - name: Inform that the new command exist + uses: actions/github-script@v7 + with: + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: 'We are migrating the command bot to be a GitHub Action

Please, see the documentation on how to use it' + }) diff --git a/.github/workflows/command-sync.yml b/.github/workflows/command-sync.yml new file mode 100644 index 0000000000000000000000000000000000000000..fa5bb9eaf912506731537a5c441bc54fbb431157 --- /dev/null +++ b/.github/workflows/command-sync.yml @@ -0,0 +1,68 @@ +name: Command Sync + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + chain: + description: Chain + type: choice + required: true + options: + - westend + - rococo + sync-type: + description: Sync type + type: choice + required: true + options: + - warp + - full + - fast + - fast-unsafe + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-sync: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-warpsync + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run sync + run: | + "./scripts/sync.sh" --chain "${{ inputs.chain }}" --type "${{ inputs.sync-type }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-update-ui.yml b/.github/workflows/command-update-ui.yml new file mode 100644 index 0000000000000000000000000000000000000000..b6b0420e786899134a5693ca6992391a17d1d98f --- /dev/null +++ b/.github/workflows/command-update-ui.yml @@ -0,0 +1,56 @@ +name: Command Update UI + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + rust-version: + description: Version of rust. Example 1.70 + required: false + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-update-ui: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 90 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run update-ui + run: | + "./scripts/update-ui-tests.sh" "${{ inputs.rust-version }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml index 9b5b89e34475699ccbcaeca34cd290882ee45a9a..33cf9316920048c65c70376557586d905fd7c9a6 100644 --- a/.github/workflows/publish-check-crates.yml +++ b/.github/workflows/publish-check-crates.yml @@ -20,7 +20,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.5.1 + run: cargo install parity-publish@0.6.0 - name: parity-publish check run: parity-publish --color always check --allow-unpublished diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml index 9643361d9d318f84d64a212358a825a8e0b5aa20..08c50638267ba3be596b5b563433fccf28c2652b 100644 --- a/.github/workflows/publish-claim-crates.yml +++ b/.github/workflows/publish-claim-crates.yml @@ -18,7 +18,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.5.1 + run: cargo install parity-publish@0.6.0 - name: parity-publish claim env: diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index f39eb4c1716ebea4fb3207ea1a2ecc8227037448..20492f2d3a9104fd08af663d19aed9a301918e6c 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -31,7 +31,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - binary: [ frame-omni-bencher, chain-spec-builder ] + # Tuples of [package, binary-name] + binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] steps: - name: Checkout sources uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 @@ -41,21 +42,16 @@ jobs: sudo apt update sudo apt install -y protobuf-compiler - - name: Build ${{ matrix.binary }} binary + - name: Build ${{ matrix.binary[1] }} binary run: | - if [[ ${{ matrix.binary }} =~ chain-spec-builder ]]; then - cargo build --locked --profile=production -p staging-${{ matrix.binary }} --bin ${{ matrix.binary }} - target/production/${{ matrix.binary }} -h - else - cargo build --locked --profile=production -p ${{ matrix.binary }} - target/production/${{ matrix.binary }} --version - fi + cargo build --locked --profile=production -p ${{ matrix.binary[0] }} --bin ${{ matrix.binary[1] }} + target/production/${{ matrix.binary[1] }} --version - - name: Upload ${{ matrix.binary }} binary + - name: Upload ${{ matrix.binary[1] }} binary uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ matrix.binary }} - path: target/production/${{ matrix.binary }} + name: ${{ matrix.binary[1] }} + path: target/production/${{ matrix.binary[1] }} publish-release-draft: diff --git a/.github/workflows/release-clobber-stable.yml b/.github/workflows/release-clobber-stable.yml new file mode 100644 index 0000000000000000000000000000000000000000..643c14daa15b1aa7501e27f272524e0ee010e781 --- /dev/null +++ b/.github/workflows/release-clobber-stable.yml @@ -0,0 +1,70 @@ +name: Clobber Stable + +# This action implements the +# [Clobbering](https://github.com/paritytech/polkadot-sdk/blob/master/docs/RELEASE.md#clobbering) +# process from the release process. It pushes a new commit to the `stable` branch with all the +# current content of the `audited` tag. It does not use a merge commit, but rather 'clobbers' the +# branch with a single commit that contains all the changes. It has a naming scheme of `Clobber with +# audited ($COMMIT)`. +# Currently, the script is only triggered manually, but can be easily changed to a schedule. + +on: + workflow_dispatch: + +permissions: + contents: write + +jobs: + clobber-stable: + runs-on: ubuntu-latest + timeout-minutes: 5 + env: + STABLE: stable + UNSTABLE: master + AUDITED: audited + steps: + - name: Checkout + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Prechecks + run: | + # Properly fetch + git fetch --prune --unshallow origin tag $AUDITED + git fetch origin $STABLE + + # Sanity checks + git checkout -q tags/$AUDITED || (echo "Could not find the '$AUDITED' tag." && exit 1) + COMMIT=$(git rev-parse tags/$AUDITED) + #$(git branch --contains $COMMIT | grep -q $UNSTABLE) || (echo "The '$AUDITED' tag is not on the '$UNSTABLE' branch." && exit 1) + + git config --global user.email "admin@parity.io" + git config --global user.name "Parity Release Team" + + - name: Prepare commit + run: | + git checkout --quiet origin/$STABLE + + # Delete all tracked files in the working directory + git ls-files -z | xargs -0 rm -f + + # Find and delete any empty directories + find . -type d -empty -delete + + git add . 1>/dev/null 2>/dev/null + git commit -qm "Delete all files" + + # Grab the files from the commit + git checkout --quiet tags/$AUDITED -- . + + # Stage, commit, and push the working directory which now matches 'audited' 1:1 + git status + COMMIT=$(git rev-parse --short=10 tags/$AUDITED) + git add . 1>/dev/null 2>/dev/null + git commit --allow-empty --amend -qm "Clobber with $AUDITED ($COMMIT)" + + - name: Push stable branch + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + git log -3 + git push --verbose origin HEAD:$STABLE diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 95b1846b98e0c47cc6de2c92cadc16adc0cab487..69a4bdbdda9ae87a188a23bceb9185d813424a98 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -6,8 +6,6 @@ env: on: push: - tags: - - "*" branches: - release-v[0-9]+.[0-9]+.[0-9]+* - release-cumulus-v[0-9]+* diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index 5fdfabc437fe721339af817f12aea1a58a6c0346..55addf11de06db43b30b336d83b39f282d03c292 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -12,15 +12,7 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -env: - FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} - FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} - FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} - FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} - FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} - jobs: - changes: permissions: pull-requests: read @@ -31,7 +23,7 @@ jobs: # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 # This workaround sets the container image for each job using 'set-image' job output. needs: changes - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: ubuntu-latest outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} @@ -40,12 +32,12 @@ jobs: uses: actions/checkout@v4 - id: set_image run: cat .github/env >> $GITHUB_OUTPUT - + test-linux-stable-int: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} env: @@ -60,13 +52,13 @@ jobs: uses: actions/checkout@v4 - name: script run: WASM_BUILD_NO_COLOR=1 time forklift cargo test -p staging-node-cli --release --locked -- --ignored - + # https://github.com/paritytech/ci_cd/issues/864 test-linux-stable-runtime-benchmarks: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} env: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 293acadc4e6a892fea9aa7fa3686cd821606992a..a413d330615931faab5326da6aee31469ac2903f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,15 +11,7 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -env: - FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} - FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} - FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} - FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} - FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} - jobs: - changes: permissions: pull-requests: read @@ -40,9 +32,9 @@ jobs: quick-benchmarks: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} env: @@ -55,13 +47,13 @@ jobs: uses: actions/checkout@v4 - name: script run: time forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet - + # cf https://github.com/paritytech/polkadot-sdk/issues/1652 test-syscalls: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} continue-on-error: true # this rarely triggers in practice @@ -81,12 +73,12 @@ jobs: # - if [[ "$CI_JOB_STATUS" == "failed" ]]; then # printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; # fi - + cargo-check-all-benches: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} env: diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 73a8c52c448f72d12e510d65b2f7ff38469856f0..7f2babc6bd47237032b941a6bf9de4a91b031932 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -120,7 +120,8 @@ default: .forklift-cache: before_script: - mkdir ~/.forklift - - cp .forklift/config.toml ~/.forklift/config.toml + - cp .forklift/config-gitlab.toml ~/.forklift/config.toml + - cat .forklift/config-gitlab.toml > .forklift/config.toml - > if [ "$FORKLIFT_BYPASS" != "true" ]; then echo "FORKLIFT_BYPASS not set"; diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index a7f321505bacf99df202c1469e7a75b4f0b30ba4..6e2b53fae6198501297960de84ecdb1606d3e128 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -149,3 +149,27 @@ zombienet-cumulus-0007-full_node_warp_sync: --local-dir="${LOCAL_DIR}" --concurrency=1 --test="0007-full_node_warp_sync.zndsl" + +zombienet-cumulus-0008-elastic_authoring: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0008-elastic_authoring.zndsl" + +zombienet-cumulus-0009-elastic_pov_recovery: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0009-elastic_pov_recovery.zndsl" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index b158cbe0b5aa3a50d490129dab87c9e7d6769b35..90251082077ce07f739a5b122deaf6023dcfeaa6 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -162,6 +162,9 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: - .zombienet-polkadot-common variables: FORCED_INFRA_INSTANCE: "spot-iops" + before_script: + - !reference [.zombienet-polkadot-common, before_script] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/elastic_scaling" @@ -170,6 +173,9 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: extends: - .zombienet-polkadot-common + before_script: + - !reference [.zombienet-polkadot-common, before_script] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/elastic_scaling" @@ -199,6 +205,17 @@ zombienet-polkadot-functional-0014-chunk-fetching-network-compatibility: --local-dir="${LOCAL_DIR}/functional" --test="0014-chunk-fetching-network-compatibility.zndsl" +zombienet-polkadot-functional-0015-coretime-shared-core: + extends: + - .zombienet-polkadot-common + before_script: + - !reference [.zombienet-polkadot-common, before_script] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0015-coretime-shared-core.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/Cargo.lock b/Cargo.lock index 3b07b6a8ab432fe296fdf29cf7ffdec1a723f2fe..96404a84975dbeff904b9476ae72e14db6dd1b9a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -83,7 +83,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.10", + "getrandom", "once_cell", "version_check", ] @@ -95,7 +95,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.10", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -130,7 +130,7 @@ dependencies = [ "hex-literal", "itoa", "proptest", - "rand 0.8.5", + "rand", "ruint", "serde", "tiny-keccak", @@ -294,6 +294,9 @@ name = "arbitrary" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +dependencies = [ + "derive_arbitrary", +] [[package]] name = "ark-bls12-377" @@ -629,7 +632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand 0.8.5", + "rand", ] [[package]] @@ -639,7 +642,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand 0.8.5", + "rand", "rayon", ] @@ -652,7 +655,7 @@ dependencies = [ "ark-serialize 0.4.2", "ark-std 0.4.0", "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", "sha3", ] @@ -689,8 +692,24 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ - "asn1-rs-derive", - "asn1-rs-impl", + "asn1-rs-derive 0.4.0", + "asn1-rs-impl 0.1.0", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "asn1-rs" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" +dependencies = [ + "asn1-rs-derive 0.5.0", + "asn1-rs-impl 0.2.0", "displaydoc", "nom", "num-traits", @@ -708,7 +727,19 @@ dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", + "synstructure 0.13.1", ] [[package]] @@ -722,6 +753,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "assert_cmd" version = "2.0.12" @@ -763,7 +805,6 @@ name = "asset-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-hub-rococo-runtime", "asset-test-utils", "cumulus-pallet-parachain-system", "emulated-integration-tests-common", @@ -777,9 +818,7 @@ dependencies = [ "pallet-xcm", "parachains-common", "parity-scale-codec", - "penpal-runtime", "polkadot-runtime-common", - "rococo-runtime", "rococo-runtime-constants", "rococo-system-emulated-network", "sp-runtime", @@ -820,6 +859,7 @@ dependencies = [ "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-assets", + "pallet-assets-freezer", "pallet-aura", "pallet-authorship", "pallet-balances", @@ -867,7 +907,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -890,7 +930,6 @@ name = "asset-hub-westend-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-hub-westend-runtime", "asset-test-utils", "cumulus-pallet-parachain-system", "cumulus-pallet-xcmp-queue", @@ -908,16 +947,14 @@ dependencies = [ "pallet-xcm", "parachains-common", "parity-scale-codec", - "penpal-runtime", "polkadot-runtime-common", "sp-core", "sp-keyring", "sp-runtime", "staging-xcm", "staging-xcm-executor", - "westend-runtime", "westend-system-emulated-network", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -953,6 +990,7 @@ dependencies = [ "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-assets", + "pallet-assets-freezer", "pallet-aura", "pallet-authorship", "pallet-balances", @@ -999,7 +1037,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -1071,7 +1109,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] @@ -1081,11 +1119,11 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ - "async-lock", + "async-lock 2.8.0", "async-task", "concurrent-queue", "fastrand 1.9.0", - "futures-lite", + "futures-lite 1.13.0", "slab", ] @@ -1095,10 +1133,10 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" dependencies = [ - "async-lock", + "async-lock 2.8.0", "autocfg", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -1109,10 +1147,10 @@ checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "blocking", - "futures-lite", + "futures-lite 1.13.0", "once_cell", ] @@ -1122,27 +1160,57 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock", + "async-lock 2.8.0", "autocfg", "cfg-if", "concurrent-queue", - "futures-lite", + "futures-lite 1.13.0", "log", "parking", - "polling", + "polling 2.8.0", "rustix 0.37.23", "slab", "socket2 0.4.9", "waker-fn", ] +[[package]] +name = "async-io" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" +dependencies = [ + "async-lock 3.4.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.4.0", + "rustix 0.38.21", + "slab", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "async-lock" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ - "event-listener", + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.2.0", + "event-listener-strategy", + "pin-project-lite 0.2.12", ] [[package]] @@ -1151,10 +1219,10 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4051e67316bc7eff608fe723df5d32ed639946adcd69e07df41fd42a7b411f1f" dependencies = [ - "async-io", + "async-io 1.13.0", "autocfg", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -1163,13 +1231,13 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" dependencies = [ - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "autocfg", "blocking", "cfg-if", - "event-listener", - "futures-lite", + "event-listener 2.5.3", + "futures-lite 1.13.0", "rustix 0.37.23", "signal-hook", "windows-sys 0.48.0", @@ -1184,13 +1252,13 @@ dependencies = [ "async-attributes", "async-channel", "async-global-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", @@ -1232,9 +1300,9 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.79" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", @@ -1266,6 +1334,17 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" +[[package]] +name = "attohttpc" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +dependencies = [ + "http 0.2.9", + "log", + "url", +] + [[package]] name = "atty" version = "0.2.14" @@ -1301,9 +1380,9 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ - "getrandom 0.2.10", + "getrandom", "instant", - "rand 0.8.5", + "rand", ] [[package]] @@ -1335,8 +1414,8 @@ dependencies = [ "dleq_vrf", "fflonk", "merlin", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand_chacha", + "rand_core", "ring 0.1.0", "sha2 0.10.8", "sp-ark-bls12-381", @@ -1368,6 +1447,12 @@ version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "base64ct" version = "1.6.0" @@ -1490,9 +1575,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bitvec" @@ -1598,11 +1683,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ "async-channel", - "async-lock", + "async-lock 2.8.0", "async-task", "atomic-waker", "fastrand 1.9.0", - "futures-lite", + "futures-lite 1.13.0", "log", ] @@ -1899,7 +1984,7 @@ dependencies = [ "bp-parachains", "bp-polkadot-core", "bp-runtime", - "ed25519-dalek 2.1.1", + "ed25519-dalek", "finality-grandpa", "parity-scale-codec", "sp-application-crypto", @@ -1972,8 +2057,6 @@ dependencies = [ name = "bridge-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ - "asset-hub-rococo-runtime", - "bridge-hub-rococo-runtime", "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", @@ -2098,8 +2181,7 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", - "tuplex", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -2158,10 +2240,10 @@ dependencies = [ name = "bridge-hub-westend-integration-tests" version = "1.0.0" dependencies = [ - "bridge-hub-westend-runtime", "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", + "hex-literal", "pallet-asset-conversion", "pallet-assets", "pallet-balances", @@ -2258,9 +2340,8 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", - "tuplex", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -2278,7 +2359,6 @@ dependencies = [ "bp-xcm-bridge-hub-router", "frame-support", "frame-system", - "hash-db", "log", "pallet-balances", "pallet-bridge-grandpa", @@ -2289,8 +2369,6 @@ dependencies = [ "pallet-utility", "parity-scale-codec", "scale-info", - "sp-api", - "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0", @@ -2447,6 +2525,12 @@ dependencies = [ "libc", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -2588,7 +2672,7 @@ dependencies = [ "multibase", "multihash 0.17.0", "serde", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -2601,7 +2685,7 @@ dependencies = [ "multibase", "multihash 0.18.1", "serde", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -2765,7 +2849,7 @@ checksum = "a90d114103adbc625300f346d4d09dfb4ab1c4a8df6868435dd903392ecf4354" dependencies = [ "libc", "once_cell", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] @@ -2797,8 +2881,6 @@ name = "collectives-westend-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-hub-westend-runtime", - "collectives-westend-runtime", "cumulus-pallet-parachain-system", "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", @@ -2817,7 +2899,6 @@ dependencies = [ "staging-xcm", "staging-xcm-executor", "testnet-parachains-constants", - "westend-runtime", "westend-runtime-constants", "westend-system-emulated-network", ] @@ -2896,7 +2977,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -2984,7 +3065,7 @@ dependencies = [ "fflonk", "getrandom_or_panic", "merlin", - "rand_chacha 0.3.1", + "rand_chacha", ] [[package]] @@ -3060,7 +3141,7 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb" dependencies = [ - "getrandom 0.2.10", + "getrandom", "once_cell", "proc-macro-hack", "tiny-keccak", @@ -3152,7 +3233,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -3163,9 +3244,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -3173,9 +3254,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "core2" @@ -3201,6 +3282,7 @@ dependencies = [ "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-benchmarking", @@ -3249,7 +3331,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -3267,6 +3349,7 @@ dependencies = [ "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-benchmarking", @@ -3314,7 +3397,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -3570,7 +3653,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" dependencies = [ "generic-array 0.14.7", - "rand_core 0.6.4", + "rand_core", "subtle 2.5.0", "zeroize", ] @@ -3582,7 +3665,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array 0.14.7", - "rand_core 0.6.4", + "rand_core", "typenum", ] @@ -3644,7 +3727,7 @@ dependencies = [ "cumulus-test-runtime", "futures", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -3675,6 +3758,7 @@ dependencies = [ "cumulus-relay-chain-interface", "futures", "parity-scale-codec", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-overseer", @@ -3685,6 +3769,7 @@ dependencies = [ "sc-consensus-babe", "sc-consensus-slots", "sc-telemetry", + "sc-utils", "schnellru", "sp-api", "sp-application-crypto", @@ -3699,6 +3784,7 @@ dependencies = [ "sp-state-machine", "sp-timestamp", "substrate-prometheus-endpoint", + "tokio", "tracing", ] @@ -3758,7 +3844,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-relay-chain-interface", "futures", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-consensus", "sp-api", "sp-block-builder", @@ -3783,7 +3869,7 @@ dependencies = [ "futures", "futures-timer", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-parachain-primitives", @@ -3819,7 +3905,6 @@ dependencies = [ "cumulus-test-relay-sproof-builder", "parity-scale-codec", "sc-client-api", - "scale-info", "sp-api", "sp-crypto-hashing", "sp-inherents", @@ -3849,7 +3934,7 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "portpicker", - "rand 0.8.5", + "rand", "rstest", "sc-cli", "sc-client-api", @@ -3966,7 +4051,7 @@ dependencies = [ "polkadot-parachain-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", - "rand 0.8.5", + "rand", "sc-client-api", "scale-info", "sp-consensus-slots", @@ -4246,15 +4331,8 @@ dependencies = [ "cumulus-relay-chain-interface", "cumulus-relay-chain-rpc-interface", "futures", - "parking_lot 0.12.1", - "polkadot-availability-recovery", - "polkadot-collator-protocol", "polkadot-core-primitives", "polkadot-network-bridge", - "polkadot-node-collation-generation", - "polkadot-node-core-chain-api", - "polkadot-node-core-prospective-parachains", - "polkadot-node-core-runtime-api", "polkadot-node-network-protocol", "polkadot-node-subsystem-util", "polkadot-overseer", @@ -4291,7 +4369,7 @@ dependencies = [ "parity-scale-codec", "pin-project", "polkadot-overseer", - "rand 0.8.5", + "rand", "sc-client-api", "sc-rpc-api", "sc-service", @@ -4447,8 +4525,7 @@ dependencies = [ "polkadot-service", "polkadot-test-service", "portpicker", - "rand 0.8.5", - "rococo-parachain-runtime", + "rand", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", @@ -4473,7 +4550,6 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-aura", - "sp-consensus-grandpa", "sp-core", "sp-io", "sp-keyring", @@ -4500,7 +4576,7 @@ dependencies = [ "openssl-probe", "openssl-sys", "schannel", - "socket2 0.5.6", + "socket2 0.5.7", "windows-sys 0.52.0", ] @@ -4522,29 +4598,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle 2.5.0", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms", "rustc_version 0.4.0", "subtle 2.5.0", "zeroize", @@ -4569,7 +4631,7 @@ checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" dependencies = [ "byteorder", "digest 0.9.0", - "rand_core 0.6.4", + "rand_core", "subtle-ng", "zeroize", ] @@ -4682,7 +4744,21 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs 0.6.1", "displaydoc", "nom", "num-bigint", @@ -4709,20 +4785,20 @@ dependencies = [ [[package]] name = "derive-syn-parse" -version = "0.1.5" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79116f119dd1dba1abf1f3405f03b9b0e79a27a3883864bfebded8a3dc768cd" +checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", - "syn 1.0.109", + "syn 2.0.61", ] [[package]] -name = "derive-syn-parse" -version = "0.2.0" +name = "derive_arbitrary" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", @@ -4890,7 +4966,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a081e51fb188742f5a7a1164ad752121abcb22874b21e2c3b0dd040c515fdad" dependencies = [ "common-path", - "derive-syn-parse 0.2.0", + "derive-syn-parse", "once_cell", "proc-macro2 1.0.82", "quote 1.0.35", @@ -4963,19 +5039,10 @@ dependencies = [ "elliptic-curve", "rfc6979", "serdect", - "signature 2.1.0", + "signature", "spki", ] -[[package]] -name = "ed25519" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" -dependencies = [ - "signature 1.6.4", -] - [[package]] name = "ed25519" version = "2.2.2" @@ -4983,21 +5050,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ "pkcs8", - "signature 2.1.0", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek 3.2.0", - "ed25519 1.5.3", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "zeroize", + "signature", ] [[package]] @@ -5006,9 +5059,9 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.2", - "ed25519 2.2.2", - "rand_core 0.6.4", + "curve25519-dalek", + "ed25519", + "rand_core", "serde", "sha2 0.10.8", "subtle 2.5.0", @@ -5021,11 +5074,11 @@ version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" dependencies = [ - "curve25519-dalek 4.1.2", - "ed25519 2.2.2", + "curve25519-dalek", + "ed25519", "hashbrown 0.14.3", "hex", - "rand_core 0.6.4", + "rand_core", "sha2 0.10.8", "zeroize", ] @@ -5049,7 +5102,7 @@ dependencies = [ "generic-array 0.14.7", "group", "pkcs8", - "rand_core 0.6.4", + "rand_core", "sec1", "serdect", "subtle 2.5.0", @@ -5178,19 +5231,6 @@ dependencies = [ "regex", ] -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.10.1" @@ -5331,6 +5371,27 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite 0.2.12", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.2.0", + "pin-project-lite 0.2.12", +] + [[package]] name = "exit-future" version = "0.2.0" @@ -5457,7 +5518,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle 2.5.0", ] @@ -5514,8 +5575,8 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "scale-info", ] @@ -5530,7 +5591,7 @@ dependencies = [ "futures", "log", "num-traits", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "relay-utils", ] @@ -5553,7 +5614,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand 0.8.5", + "rand", "rustc-hex", "static_assertions", ] @@ -5571,7 +5632,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", - "libz-sys", "miniz_oxide", ] @@ -5693,7 +5753,7 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "rand 0.8.5", + "rand", "rand_pcg", "sc-block-builder", "sc-chain-spec", @@ -5761,7 +5821,7 @@ dependencies = [ "frame-support", "frame-system", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sp-arithmetic", "sp-core", @@ -5781,7 +5841,7 @@ dependencies = [ "frame-support", "honggfuzz", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sp-arithmetic", "sp-npos-elections", @@ -5931,12 +5991,12 @@ version = "23.0.0" dependencies = [ "Inflector", "cfg-expr", - "derive-syn-parse 0.2.0", + "derive-syn-parse", "expander", "frame-support-procedural-tools", "itertools 0.11.0", "macro_magic", - "proc-macro-warning", + "proc-macro-warning 1.0.0", "proc-macro2 1.0.82", "quote 1.0.35", "regex", @@ -6138,6 +6198,16 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-bounded" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b07bbbe7d7e78809544c6f718d875627addc73a7c3582447abc052cd3dc67e0" +dependencies = [ + "futures-timer", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.30" @@ -6187,6 +6257,16 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "futures-core", + "pin-project-lite 0.2.12", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -6200,13 +6280,12 @@ dependencies = [ [[package]] name = "futures-rustls" -version = "0.22.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" +checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.20.8", - "webpki", + "rustls 0.21.7", ] [[package]] @@ -6297,17 +6376,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.10" @@ -6316,7 +6384,7 @@ checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -6325,8 +6393,8 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" dependencies = [ - "rand 0.8.5", - "rand_core 0.6.4", + "rand", + "rand_core", ] [[package]] @@ -6436,9 +6504,9 @@ dependencies = [ "futures-timer", "no-std-compat", "nonzero_ext", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "quanta", - "rand 0.8.5", + "rand", "smallvec", ] @@ -6449,7 +6517,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core 0.6.4", + "rand_core", "subtle 2.5.0", ] @@ -6464,7 +6532,26 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.9", + "indexmap 2.2.3", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", "indexmap 2.2.3", "slab", "tokio", @@ -6672,6 +6759,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.5" @@ -6679,15 +6777,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http", + "http 0.2.9", "pin-project-lite 0.2.12", ] [[package]] -name = "http-range-header" -version = "0.3.1" +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "pin-project-lite 0.2.12", +] [[package]] name = "httparse" @@ -6709,44 +6824,103 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.26", + "http 0.2.9", + "http-body 0.4.5", "httparse", "httpdate", "itoa", "pin-project-lite 0.2.12", - "socket2 0.4.9", + "socket2 0.5.7", "tokio", "tower-service", "tracing", "want", ] +[[package]] +name = "hyper" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite 0.2.12", + "smallvec", + "tokio", + "want", +] + [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", + "http 0.2.9", + "hyper 0.14.29", "log", - "rustls 0.21.6", + "rustls 0.21.7", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-rustls" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", + "log", + "rustls 0.23.10", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "hyper 1.3.1", + "pin-project-lite 0.2.12", + "socket2 0.5.7", + "tokio", + "tower", + "tower-service", + "tracing", +] + [[package]] name = "iana-time-zone" version = "0.1.57" @@ -6793,21 +6967,21 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.7.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" dependencies = [ "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] name = "if-watch" -version = "3.0.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" +checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io", + "async-io 2.3.3", "core-foundation", "fnv", "futures", @@ -6817,7 +6991,26 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows 0.34.0", + "windows 0.51.1", +] + +[[package]] +name = "igd-next" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +dependencies = [ + "async-trait", + "attohttpc", + "bytes", + "futures", + "http 0.2.9", + "hyper 0.14.29", + "log", + "rand", + "tokio", + "url", + "xmltree", ] [[package]] @@ -6990,7 +7183,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg", @@ -7034,13 +7227,13 @@ dependencies = [ "curl", "curl-sys", "encoding_rs", - "event-listener", - "futures-lite", - "http", + "event-listener 2.5.3", + "futures-lite 1.13.0", + "http 0.2.9", "log", "mime", "once_cell", - "polling", + "polling 2.8.0", "slab", "sluice", "tracing", @@ -7073,6 +7266,26 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine", + "jni-sys", + "log", + "thiserror", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.26" @@ -7110,9 +7323,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" +checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" dependencies = [ "jsonrpsee-core", "jsonrpsee-http-client", @@ -7126,20 +7339,22 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" +checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432" dependencies = [ + "base64 0.22.1", "futures-util", - "http", + "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls-native-certs 0.7.0", + "rustls 0.23.10", "rustls-pki-types", - "soketto", + "rustls-platform-verifier", + "soketto 0.8.0", "thiserror", "tokio", - "tokio-rustls 0.25.0", + "tokio-rustls 0.26.0", "tokio-util", "tracing", "url", @@ -7147,20 +7362,23 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" +checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b" dependencies = [ "anyhow", "async-trait", "beef", + "bytes", "futures-timer", "futures-util", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "jsonrpsee-types", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "rand 0.8.5", + "rand", "rustc-hash", "serde", "serde_json", @@ -7172,15 +7390,20 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" +checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" dependencies = [ "async-trait", - "hyper", - "hyper-rustls", + "base64 0.22.1", + "http-body 1.0.0", + "hyper 1.3.1", + "hyper-rustls 0.27.2", + "hyper-util", "jsonrpsee-core", "jsonrpsee-types", + "rustls 0.23.10", + "rustls-platform-verifier", "serde", "serde_json", "thiserror", @@ -7192,11 +7415,11 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0" +checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro-crate 3.1.0", "proc-macro2 1.0.82", "quote 1.0.35", @@ -7205,20 +7428,24 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12d8b6a9674422a8572e0b0abb12feeb3f2aeda86528c80d0350c2bd0923ab41" +checksum = "654afab2e92e5d88ebd8a39d6074483f3f2bfdf91c5ac57fe285e7127cdd4f51" dependencies = [ + "anyhow", "futures-util", - "http", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "jsonrpsee-core", "jsonrpsee-types", "pin-project", "route-recognizer", "serde", "serde_json", - "soketto", + "soketto 0.8.0", "thiserror", "tokio", "tokio-stream", @@ -7229,12 +7456,12 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" +checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af" dependencies = [ - "anyhow", "beef", + "http 1.1.0", "serde", "serde_json", "thiserror", @@ -7242,11 +7469,11 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58b9db2dfd5bb1194b0ce921504df9ceae210a345bc2f6c5a61432089bbab070" +checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" dependencies = [ - "http", + "http 1.1.0", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", @@ -7335,7 +7562,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf7a85fe66f9ff9cd74e169fdd2c94c6e1e74c412c99a73b4df3200b5d3760b2" dependencies = [ "kvdb", - "parking_lot 0.12.1", + "parking_lot 0.12.3", ] [[package]] @@ -7346,7 +7573,7 @@ checksum = "b644c70b92285f66bfc2032922a79000ea30af7bc2ab31902992a5dcb9b434f6" dependencies = [ "kvdb", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "regex", "rocksdb", "smallvec", @@ -7455,14 +7682,15 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.51.4" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f35eae38201a993ece6bdc823292d6abd1bffed1c4d0f4a3517d2bd8e1d917fe" +checksum = "e94495eb319a85b70a68b85e2389a95bb3555c71c49025b78c691a854a7e6464" dependencies = [ "bytes", + "either", "futures", "futures-timer", - "getrandom 0.2.10", + "getrandom", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -7479,18 +7707,21 @@ dependencies = [ "libp2p-request-response", "libp2p-swarm", "libp2p-tcp", + "libp2p-upnp", "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "multiaddr", + "multiaddr 0.18.1", "pin-project", + "rw-stream-sink", + "thiserror", ] [[package]] name = "libp2p-allow-block-list" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510daa05efbc25184458db837f6f9a5143888f1caa742426d92e1833ddd38a50" +checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" dependencies = [ "libp2p-core", "libp2p-identity", @@ -7500,9 +7731,9 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa33f1d26ed664c4fe2cca81a08c8e07d4c1c04f2f4ac7655c2dd85467fda0" +checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" dependencies = [ "libp2p-core", "libp2p-identity", @@ -7512,9 +7743,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.39.2" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" +checksum = "dd44289ab25e4c9230d9246c475a22241e301b23e8f4061d3bdef304a1a99713" dependencies = [ "either", "fnv", @@ -7523,50 +7754,53 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr", - "multihash 0.17.0", + "multiaddr 0.18.1", + "multihash 0.19.1", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", "quick-protobuf", - "rand 0.8.5", + "rand", "rw-stream-sink", "smallvec", "thiserror", - "unsigned-varint", + "unsigned-varint 0.7.2", "void", ] [[package]] name = "libp2p-dns" -version = "0.39.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146ff7034daae62077c415c2376b8057368042df6ab95f5432ad5e88568b1554" +checksum = "e6a18db73084b4da2871438f6239fef35190b05023de7656e877c18a00541a3b" dependencies = [ + "async-trait", "futures", "libp2p-core", + "libp2p-identity", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "smallvec", - "trust-dns-resolver 0.22.0", + "trust-dns-resolver", ] [[package]] name = "libp2p-identify" -version = "0.42.2" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5455f472243e63b9c497ff320ded0314254a9eb751799a39c283c6f20b793f3c" +checksum = "45a96638a0a176bec0a4bcaebc1afa8cf909b114477209d7456ade52c61cd9cd" dependencies = [ "asynchronous-codec", "either", "futures", + "futures-bounded", "futures-timer", "libp2p-core", "libp2p-identity", "libp2p-swarm", "log", - "lru 0.10.1", + "lru 0.12.3", "quick-protobuf", "quick-protobuf-codec", "smallvec", @@ -7576,27 +7810,27 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.1.3" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276bb57e7af15d8f100d3c11cbdd32c6752b7eef4ba7a18ecf464972c07abcce" +checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" dependencies = [ - "bs58 0.4.0", - "ed25519-dalek 2.1.1", - "log", - "multiaddr", - "multihash 0.17.0", + "bs58 0.5.0", + "ed25519-dalek", + "hkdf", + "multihash 0.19.1", "quick-protobuf", - "rand 0.8.5", + "rand", "sha2 0.10.8", "thiserror", + "tracing", "zeroize", ] [[package]] name = "libp2p-kad" -version = "0.43.3" +version = "0.44.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39d5ef876a2b2323d63c258e63c2f8e36f205fe5a11f0b3095d59635650790ff" +checksum = "16ea178dabba6dde6ffc260a8e0452ccdc8f79becf544946692fff9d412fc29d" dependencies = [ "arrayvec 0.7.4", "asynchronous-codec", @@ -7611,20 +7845,21 @@ dependencies = [ "libp2p-swarm", "log", "quick-protobuf", - "rand 0.8.5", + "quick-protobuf-codec", + "rand", "sha2 0.10.8", "smallvec", "thiserror", "uint", - "unsigned-varint", + "unsigned-varint 0.7.2", "void", ] [[package]] name = "libp2p-mdns" -version = "0.43.1" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19983e1f949f979a928f2c603de1cf180cc0dc23e4ac93a62651ccb18341460b" +checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" dependencies = [ "data-encoding", "futures", @@ -7633,9 +7868,9 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "log", - "rand 0.8.5", + "rand", "smallvec", - "socket2 0.4.9", + "socket2 0.5.7", "tokio", "trust-dns-proto 0.22.0", "void", @@ -7643,63 +7878,69 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.12.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a42ec91e227d7d0dafa4ce88b333cdf5f277253873ab087555c92798db2ddd46" +checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" dependencies = [ + "instant", "libp2p-core", "libp2p-identify", + "libp2p-identity", "libp2p-kad", "libp2p-ping", "libp2p-swarm", + "once_cell", "prometheus-client", ] [[package]] name = "libp2p-noise" -version = "0.42.2" +version = "0.43.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3673da89d29936bc6435bafc638e2f184180d554ce844db65915113f86ec5e" +checksum = "d2eeec39ad3ad0677551907dd304b2f13f17208ccebe333bef194076cd2e8921" dependencies = [ "bytes", - "curve25519-dalek 3.2.0", + "curve25519-dalek", "futures", "libp2p-core", "libp2p-identity", "log", + "multiaddr 0.18.1", + "multihash 0.19.1", "once_cell", "quick-protobuf", - "rand 0.8.5", + "rand", "sha2 0.10.8", "snow", "static_assertions", "thiserror", - "x25519-dalek 1.1.1", + "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.42.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e57759c19c28a73ef1eb3585ca410cefb72c1a709fcf6de1612a378e4219202" +checksum = "e702d75cd0827dfa15f8fd92d15b9932abe38d10d21f47c50438c71dd1b5dae3" dependencies = [ "either", "futures", "futures-timer", "instant", "libp2p-core", + "libp2p-identity", "libp2p-swarm", "log", - "rand 0.8.5", + "rand", "void", ] [[package]] name = "libp2p-quic" -version = "0.7.0-alpha.3" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6b26abd81cd2398382a1edfe739b539775be8a90fa6914f39b2ab49571ec735" +checksum = "130d451d83f21b81eb7b35b360bc7972aeafb15177784adc56528db082e6b927" dependencies = [ "bytes", "futures", @@ -7709,19 +7950,21 @@ dependencies = [ "libp2p-identity", "libp2p-tls", "log", - "parking_lot 0.12.1", - "quinn-proto", - "rand 0.8.5", - "rustls 0.20.8", + "parking_lot 0.12.3", + "quinn 0.10.2", + "rand", + "ring 0.16.20", + "rustls 0.21.7", + "socket2 0.5.7", "thiserror", "tokio", ] [[package]] name = "libp2p-request-response" -version = "0.24.1" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffdb374267d42dc5ed5bc53f6e601d4a64ac5964779c6e40bb9e4f14c1e30d5" +checksum = "d8e3b4d67870478db72bac87bfc260ee6641d0734e0e3e275798f089c3fecfd4" dependencies = [ "async-trait", "futures", @@ -7729,15 +7972,17 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand 0.8.5", + "log", + "rand", "smallvec", + "void", ] [[package]] name = "libp2p-swarm" -version = "0.42.2" +version = "0.43.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903b3d592d7694e56204d211f29d31bc004be99386644ba8731fc3e3ef27b296" +checksum = "580189e0074af847df90e75ef54f3f30059aedda37ea5a1659e8b9fca05c0141" dependencies = [ "either", "fnv", @@ -7748,7 +7993,9 @@ dependencies = [ "libp2p-identity", "libp2p-swarm-derive", "log", - "rand 0.8.5", + "multistream-select", + "once_cell", + "rand", "smallvec", "tokio", "void", @@ -7756,36 +8003,39 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fba456131824ab6acd4c7bf61e9c0f0a3014b5fc9868ccb8e10d344594cdc4f" +checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ "heck 0.4.1", + "proc-macro-warning 0.4.2", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 1.0.109", + "syn 2.0.61", ] [[package]] name = "libp2p-tcp" -version = "0.39.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d33698596d7722d85d3ab0c86c2c322254fce1241e91208e3679b4eb3026cf" +checksum = "b558dd40d1bcd1aaaed9de898e9ec6a436019ecc2420dd0016e712fbb61c5508" dependencies = [ "futures", "futures-timer", "if-watch", "libc", "libp2p-core", + "libp2p-identity", "log", - "socket2 0.4.9", + "socket2 0.5.7", "tokio", ] [[package]] name = "libp2p-tls" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" +checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" dependencies = [ "futures", "futures-rustls", @@ -7793,51 +8043,68 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls 0.20.8", + "rustls 0.21.7", + "rustls-webpki 0.101.4", "thiserror", - "webpki", - "x509-parser 0.14.0", + "x509-parser 0.15.1", "yasna", ] +[[package]] +name = "libp2p-upnp" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82775a47b34f10f787ad3e2a22e2c1541e6ebef4fe9f28f3ac553921554c94c1" +dependencies = [ + "futures", + "futures-timer", + "igd-next", + "libp2p-core", + "libp2p-swarm", + "log", + "tokio", + "void", +] + [[package]] name = "libp2p-wasm-ext" -version = "0.39.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77dff9d32353a5887adb86c8afc1de1a94d9e8c3bc6df8b2201d7cdf5c848f43" +checksum = "1e5d8e3a9e07da0ef5b55a9f26c009c8fb3c725d492d8bb4b431715786eea79c" dependencies = [ "futures", "js-sys", "libp2p-core", - "parity-send-wrapper", + "send_wrapper", "wasm-bindgen", "wasm-bindgen-futures", ] [[package]] name = "libp2p-websocket" -version = "0.41.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111273f7b3d3510524c752e8b7a5314b7f7a1fee7e68161c01a7d72cbb06db9f" +checksum = "3facf0691bab65f571bc97c6c65ffa836248ca631d631b7691ac91deb7fceb5f" dependencies = [ "either", "futures", "futures-rustls", "libp2p-core", + "libp2p-identity", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "quicksink", "rw-stream-sink", - "soketto", + "soketto 0.7.1", "url", - "webpki-roots 0.22.6", + "webpki-roots 0.25.2", ] [[package]] name = "libp2p-yamux" -version = "0.43.1" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd21d950662700a385d4c6d68e2f5f54d778e97068cdd718522222ef513bda" +checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" dependencies = [ "futures", "libp2p-core", @@ -7874,7 +8141,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand 0.8.5", + "rand", "serde", "sha2 0.9.9", "typenum", @@ -8004,40 +8271,40 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.5.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f02542ae3a94b4c4ffa37dc56388c923e286afa3bf65452e3984b50b2a2f316" +checksum = "0f46c51c205264b834ceed95c8b195026e700494bc3991aaba3b4ea9e20626d9" dependencies = [ "async-trait", "bs58 0.4.0", "bytes", "cid 0.10.1", - "ed25519-dalek 1.0.1", + "ed25519-dalek", "futures", "futures-timer", "hex-literal", "indexmap 2.2.3", "libc", "mockall 0.12.1", - "multiaddr", + "multiaddr 0.17.1", "multihash 0.17.0", "network-interface", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "prost 0.11.9", + "prost 0.12.6", "prost-build 0.11.9", - "quinn", - "rand 0.8.5", + "quinn 0.9.4", + "rand", "rcgen", "ring 0.16.20", - "rustls 0.20.8", + "rustls 0.20.9", "serde", "sha2 0.10.8", "simple-dns", "smallvec", "snow", - "socket2 0.5.6", + "socket2 0.5.7", "static_assertions", "str0m", "thiserror", @@ -8046,13 +8313,13 @@ dependencies = [ "tokio-tungstenite", "tokio-util", "tracing", - "trust-dns-resolver 0.23.2", + "trust-dns-resolver", "uint", - "unsigned-varint", + "unsigned-varint 0.8.0", "url", "webpki", - "x25519-dalek 2.0.0", - "x509-parser 0.15.1", + "x25519-dalek", + "x509-parser 0.16.0", "yasna", "zeroize", ] @@ -8088,18 +8355,18 @@ dependencies = [ [[package]] name = "lru" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" -dependencies = [ - "hashbrown 0.13.2", -] +checksum = "eedb2bdbad7e0634f83989bf596f497b070130daaa398ab22d84c39e266deec5" [[package]] name = "lru" -version = "0.11.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eedb2bdbad7e0634f83989bf596f497b070130daaa398ab22d84c39e266deec5" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +dependencies = [ + "hashbrown 0.14.3", +] [[package]] name = "lru-cache" @@ -8150,9 +8417,9 @@ dependencies = [ [[package]] name = "macro_magic" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e03844fc635e92f3a0067e25fa4bf3e3dbf3f2927bf3aa01bb7bc8f1c428949d" +checksum = "cc33f9f0351468d26fbc53d9ce00a096c8522ecb42f19b50f34f2c422f76d21d" dependencies = [ "macro_magic_core", "macro_magic_macros", @@ -8162,12 +8429,12 @@ dependencies = [ [[package]] name = "macro_magic_core" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "468155613a44cfd825f1fb0ffa532b018253920d404e6fca1e8d43155198a46d" +checksum = "1687dc887e42f352865a393acae7cf79d98fab6351cde1f58e9e057da89bf150" dependencies = [ "const-random", - "derive-syn-parse 0.1.5", + "derive-syn-parse", "macro_magic_core_macros", "proc-macro2 1.0.82", "quote 1.0.35", @@ -8176,9 +8443,9 @@ dependencies = [ [[package]] name = "macro_magic_core_macros" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" +checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", @@ -8187,9 +8454,9 @@ dependencies = [ [[package]] name = "macro_magic_macros" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" +checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote 1.0.35", @@ -8324,7 +8591,7 @@ checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" dependencies = [ "byteorder", "keccak", - "rand_core 0.6.4", + "rand_core", "zeroize", ] @@ -8335,13 +8602,12 @@ dependencies = [ "async-std", "async-trait", "bp-messages", - "env_logger 0.11.3", "finality-relay", "futures", "hex", "log", "num-traits", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "relay-utils", "sp-arithmetic", ] @@ -8353,7 +8619,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69672161530e8aeca1d1400fbf3f1a1747ff60ea604265a4e906c2442df20532" dependencies = [ "futures", - "rand 0.8.5", + "rand", "thrift", ] @@ -8452,7 +8718,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] @@ -8467,14 +8733,14 @@ dependencies = [ "bitflags 1.3.2", "blake2 0.10.6", "c2-chacha", - "curve25519-dalek 4.1.2", + "curve25519-dalek", "either", "hashlink", "lioness", "log", - "parking_lot 0.12.1", - "rand 0.8.5", - "rand_chacha 0.3.1", + "parking_lot 0.12.3", + "rand", + "rand_chacha", "rand_distr", "subtle 2.5.0", "thiserror", @@ -8488,7 +8754,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-offchain", @@ -8594,7 +8860,26 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.7.2", + "url", +] + +[[package]] +name = "multiaddr" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "libp2p-identity", + "multibase", + "multihash 0.19.1", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.2", "url", ] @@ -8620,10 +8905,10 @@ dependencies = [ "blake3", "core2", "digest 0.10.7", - "multihash-derive 0.8.0", + "multihash-derive", "sha2 0.10.8", "sha3", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -8637,10 +8922,10 @@ dependencies = [ "blake3", "core2", "digest 0.10.7", - "multihash-derive 0.8.0", + "multihash-derive", "sha2 0.10.8", "sha3", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -8650,27 +8935,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ "core2", - "unsigned-varint", -] - -[[package]] -name = "multihash-codetable" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d815ecb3c8238d00647f8630ede7060a642c9f704761cd6082cb4028af6935" -dependencies = [ - "blake2b_simd", - "blake2s_simd", - "blake3", - "core2", - "digest 0.10.7", - "multihash-derive 0.9.0", - "ripemd", - "serde", - "sha1", - "sha2 0.10.8", - "sha3", - "strobe-rs", + "unsigned-varint 0.7.2", ] [[package]] @@ -8684,32 +8949,7 @@ dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", - "synstructure", -] - -[[package]] -name = "multihash-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "890e72cb7396cb99ed98c1246a97b243cc16394470d94e0bc8b0c2c11d84290e" -dependencies = [ - "core2", - "multihash 0.19.1", - "multihash-derive-impl", -] - -[[package]] -name = "multihash-derive-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d38685e08adb338659871ecfc6ee47ba9b22dcc8abcf6975d379cc49145c3040" -dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro-error", - "proc-macro2 1.0.82", - "quote 1.0.35", - "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -8720,16 +8960,16 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "multistream-select" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" dependencies = [ "bytes", "futures", "log", "pin-project", "smallvec", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -8766,7 +9006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bddcd3bf5144b6392de80e04c347cd7fab2508f6df16a85fc496ecd5cec39bc" dependencies = [ "clap 3.2.25", - "rand 0.8.5", + "rand", ] [[package]] @@ -8882,7 +9122,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "cfg-if", "cfg_aliases", "libc", @@ -8918,7 +9158,7 @@ dependencies = [ "node-primitives", "node-testing", "parity-db", - "rand 0.8.5", + "rand", "sc-basic-authorship", "sc-client-api", "sc-transaction-pool", @@ -9251,7 +9491,16 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", +] + +[[package]] +name = "oid-registry" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +dependencies = [ + "asn1-rs 0.6.1", ] [[package]] @@ -9284,7 +9533,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "cfg-if", "foreign-types", "libc", @@ -9525,6 +9774,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "impl-trait-for-tuples", "log", "pallet-balances", "parity-scale-codec", @@ -9535,6 +9785,23 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-assets-freezer" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-assets", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", +] + [[package]] name = "pallet-atomic-swap" version = "28.0.0" @@ -9656,7 +9923,7 @@ dependencies = [ "frame-election-provider-support", "honggfuzz", "pallet-bags-list", - "rand 0.8.5", + "rand", ] [[package]] @@ -9781,7 +10048,7 @@ dependencies = [ "pallet-beefy-mmr", "pallet-mmr", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "serde", "sp-consensus-beefy", @@ -9798,7 +10065,6 @@ dependencies = [ "bp-header-chain", "bp-runtime", "bp-test-utils", - "finality-grandpa", "frame-benchmarking", "frame-support", "frame-system", @@ -9810,13 +10076,13 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std 14.0.0", - "sp-trie", ] [[package]] name = "pallet-bridge-messages" version = "0.7.0" dependencies = [ + "bp-header-chain", "bp-messages", "bp-runtime", "bp-test-utils", @@ -9824,13 +10090,15 @@ dependencies = [ "frame-support", "frame-system", "log", - "num-traits", "pallet-balances", + "pallet-bridge-grandpa", "parity-scale-codec", "scale-info", + "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0", + "sp-trie", ] [[package]] @@ -9853,7 +10121,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std 14.0.0", - "sp-trie", ] [[package]] @@ -9931,7 +10198,7 @@ dependencies = [ "pallet-session", "pallet-timestamp", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sp-consensus-aura", "sp-core", @@ -10000,7 +10267,7 @@ dependencies = [ "parity-scale-codec", "paste", "pretty_assertions", - "rand 0.8.5", + "rand", "rand_pcg", "scale-info", "serde", @@ -10217,7 +10484,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "scale-info", "sp-core", "sp-io", @@ -10240,8 +10507,8 @@ dependencies = [ "pallet-balances", "pallet-election-provider-support-benchmarking", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "scale-info", "sp-arithmetic", "sp-core", @@ -10465,6 +10732,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-core", + "sp-inherents", "sp-io", "sp-runtime", "sp-std 14.0.0", @@ -10614,7 +10882,7 @@ dependencies = [ "frame-system", "log", "parity-scale-codec", - "rand 0.8.5", + "rand", "rand_distr", "scale-info", "serde", @@ -10848,7 +11116,7 @@ dependencies = [ "honggfuzz", "log", "pallet-nomination-pools", - "rand 0.8.5", + "rand", "sp-io", "sp-runtime", "sp-tracing 16.0.0", @@ -11288,7 +11556,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sp-core", "sp-io", @@ -11320,7 +11588,7 @@ dependencies = [ "log", "pallet-balances", "parity-scale-codec", - "rand_chacha 0.3.1", + "rand_chacha", "scale-info", "sp-arithmetic", "sp-core", @@ -11346,7 +11614,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "rand_chacha 0.3.1", + "rand_chacha", "scale-info", "serde", "sp-application-crypto", @@ -11399,7 +11667,7 @@ dependencies = [ "log", "pallet-balances", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "scale-info", "serde", "sp-core", @@ -11698,7 +11966,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -11879,7 +12147,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-transaction-pool", "sp-version", "staging-parachain-info", @@ -11972,8 +12239,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", - "rand 0.8.5", - "rand_core 0.6.4", + "rand", + "rand_core", "serde", "unicode-normalization", ] @@ -11998,8 +12265,8 @@ dependencies = [ "log", "lz4", "memmap2 0.5.10", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "siphasher", "snap", ] @@ -12031,12 +12298,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "parity-send-wrapper" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" - [[package]] name = "parity-util-mem" version = "0.12.0" @@ -12049,7 +12310,7 @@ dependencies = [ "impl-trait-for-tuples", "lru 0.8.1", "parity-util-mem-derive", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "primitive-types", "smallvec", "winapi", @@ -12063,7 +12324,7 @@ checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ "proc-macro2 1.0.82", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -12091,9 +12352,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core 0.9.8", @@ -12139,7 +12400,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle 2.5.0", ] @@ -12246,7 +12507,7 @@ dependencies = [ "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -12274,9 +12535,7 @@ dependencies = [ "pallet-message-queue", "parachains-common", "parity-scale-codec", - "people-rococo-runtime", "polkadot-runtime-common", - "rococo-runtime", "rococo-runtime-constants", "rococo-system-emulated-network", "sp-runtime", @@ -12347,7 +12606,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -12375,12 +12634,10 @@ dependencies = [ "pallet-message-queue", "parachains-common", "parity-scale-codec", - "people-westend-runtime", "polkadot-runtime-common", "sp-runtime", "staging-xcm", "staging-xcm-executor", - "westend-runtime", "westend-runtime-constants", "westend-system-emulated-network", ] @@ -12448,7 +12705,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -12640,9 +12897,9 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand", + "rand_chacha", + "rand_core", "schnorrkel 0.11.4", "sp-authority-discovery", "sp-core", @@ -12666,8 +12923,8 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "sp-application-crypto", "sp-authority-discovery", "sp-core", @@ -12695,7 +12952,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", - "rand 0.8.5", + "rand", "rstest", "sc-network", "schnellru", @@ -12727,7 +12984,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", - "rand 0.8.5", + "rand", "rstest", "sc-network", "schnellru", @@ -12878,15 +13135,15 @@ dependencies = [ "futures", "futures-timer", "lazy_static", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", "quickcheck", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "sc-network", "sc-network-common", "sp-application-crypto", @@ -12912,7 +13169,7 @@ dependencies = [ "futures", "futures-timer", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-subsystem", @@ -12968,7 +13225,7 @@ dependencies = [ "log", "merlin", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -12978,9 +13235,9 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand", + "rand_chacha", + "rand_core", "sc-keystore", "schnellru", "schnorrkel 0.11.4", @@ -13009,7 +13266,7 @@ dependencies = [ "kvdb-memorydb", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-erasure-coding", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -13125,7 +13382,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -13196,6 +13453,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sc-keystore", "sp-application-crypto", "sp-core", @@ -13255,7 +13513,7 @@ dependencies = [ "polkadot-parachain-primitives", "polkadot-primitives", "procfs", - "rand 0.8.5", + "rand", "rococo-runtime", "rusty-fork", "sc-sysinfo", @@ -13387,7 +13645,7 @@ dependencies = [ "log", "mick-jaeger", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-primitives", "sc-network", @@ -13405,7 +13663,9 @@ dependencies = [ "bs58 0.5.0", "futures", "futures-timer", - "hyper", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "log", "parity-scale-codec", "polkadot-primitives", @@ -13438,8 +13698,8 @@ dependencies = [ "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "sc-authority-discovery", "sc-network", "sc-network-types", @@ -13487,7 +13747,7 @@ version = "1.0.0" dependencies = [ "async-trait", "futures", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -13550,7 +13810,7 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", "polkadot-erasure-coding", "polkadot-node-jaeger", @@ -13564,7 +13824,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "prioritized-metered-channel", - "rand 0.8.5", + "rand", "sc-client-api", "schnellru", "sp-application-crypto", @@ -13585,7 +13845,7 @@ dependencies = [ "futures", "futures-timer", "orchestra", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -13745,7 +14005,7 @@ name = "polkadot-primitives-test-helpers" version = "1.0.0" dependencies = [ "polkadot-primitives", - "rand 0.8.5", + "rand", "sp-application-crypto", "sp-core", "sp-keyring", @@ -13887,10 +14147,9 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-metrics", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rstest", - "rustc-hex", "sc-keystore", "scale-info", "serde", @@ -14005,6 +14264,7 @@ dependencies = [ "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", + "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", @@ -14296,8 +14556,8 @@ dependencies = [ "tracing-gum-proc-macro", "westend-runtime-constants", "xcm-emulator", - "xcm-fee-payment-runtime-api", "xcm-procedural", + "xcm-runtime-apis", "xcm-simulator", ] @@ -14317,6 +14577,9 @@ dependencies = [ "frame-support", "frame-system", "kitchensink-runtime", + "minimal-template-runtime", + "pallet-asset-conversion-tx-payment", + "pallet-asset-tx-payment", "pallet-assets", "pallet-aura", "pallet-authorship", @@ -14335,10 +14598,12 @@ dependencies = [ "pallet-proxy", "pallet-referenda", "pallet-scheduler", + "pallet-skip-feeless-payment", "pallet-timestamp", "pallet-transaction-payment", "pallet-uniques", "pallet-utility", + "parachain-template-runtime", "parity-scale-codec", "polkadot-sdk", "polkadot-sdk-frame", @@ -14358,6 +14623,7 @@ dependencies = [ "sc-service", "scale-info", "simple-mermaid 0.1.1", + "solochain-template-runtime", "sp-api", "sp-arithmetic", "sp-core", @@ -14366,6 +14632,9 @@ dependencies = [ "sp-keyring", "sp-offchain", "sp-runtime", + "sp-runtime-interface 24.0.0", + "sp-std 14.0.0", + "sp-tracing 16.0.0", "sp-version", "staging-chain-spec-builder", "staging-node-cli", @@ -14436,7 +14705,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "parity-db", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", @@ -14534,7 +14803,7 @@ dependencies = [ "tracing-gum", "westend-runtime", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -14558,7 +14827,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", - "rand_chacha 0.3.1", + "rand_chacha", "sc-keystore", "sc-network", "sp-application-crypto", @@ -14626,9 +14895,9 @@ dependencies = [ "prometheus", "pyroscope", "pyroscope_pprofrs", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand", + "rand_chacha", + "rand_core", "rand_distr", "sc-keystore", "sc-network", @@ -14647,7 +14916,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-timestamp", - "strum 0.24.1", + "strum 0.26.2", "substrate-prometheus-endpoint", "tokio", "tracing-gum", @@ -14707,7 +14976,7 @@ dependencies = [ "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", - "rand 0.8.5", + "rand", "sp-core", "sp-keystore", "substrate-build-script-utils", @@ -14795,7 +15064,7 @@ dependencies = [ "polkadot-runtime-parachains", "polkadot-service", "polkadot-test-runtime", - "rand 0.8.5", + "rand", "sc-authority-discovery", "sc-chain-spec", "sc-cli", @@ -14936,6 +15205,20 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "polling" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" +dependencies = [ + "cfg-if", + "concurrent-queue", + "pin-project-lite 0.2.12", + "rustix 0.38.21", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "poly1305" version = "0.8.0" @@ -14971,7 +15254,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be97d76faf1bfab666e1375477b23fde79eccf0276e9b63b92a39d676a889ba9" dependencies = [ - "rand 0.8.5", + "rand", ] [[package]] @@ -14987,7 +15270,7 @@ dependencies = [ "log", "nix 0.26.2", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "smallvec", "symbolic-demangle", "tempfile", @@ -15152,6 +15435,17 @@ version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" +[[package]] +name = "proc-macro-warning" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "proc-macro-warning" version = "1.0.0" @@ -15187,7 +15481,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "chrono", "flate2", "hex", @@ -15202,7 +15496,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "chrono", "hex", ] @@ -15217,19 +15511,19 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "thiserror", ] [[package]] name = "prometheus-client" -version = "0.19.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6fa99d535dd930d1249e6c79cb3c2915f9172a540fe2b02a4c8f9ca954721e" +checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "prometheus-client-derive-encode", ] @@ -15264,11 +15558,11 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.0", + "bitflags 2.6.0", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rand_xorshift", "regex-syntax 0.8.2", "rusty-fork", @@ -15288,12 +15582,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" dependencies = [ "bytes", - "prost-derive 0.12.4", + "prost-derive 0.12.6", ] [[package]] @@ -15332,7 +15626,7 @@ dependencies = [ "once_cell", "petgraph", "prettyplease 0.2.12", - "prost 0.12.4", + "prost 0.12.6", "prost-types 0.12.4", "regex", "syn 2.0.61", @@ -15354,9 +15648,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.11.0", @@ -15380,7 +15674,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" dependencies = [ - "prost 0.12.4", + "prost 0.12.6", ] [[package]] @@ -15433,7 +15727,7 @@ dependencies = [ "mach2", "once_cell", "raw-cpuid", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "web-sys", "winapi", ] @@ -15455,15 +15749,26 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1693116345026436eb2f10b677806169c1a1260c1c60eaaffe3fb5a29ae23d8b" +checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" dependencies = [ "asynchronous-codec", "bytes", "quick-protobuf", "thiserror", - "unsigned-varint", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "quick_cache" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5253a3a0d56548d5b0be25414171dc780cc6870727746d05bd2bde352eee96c5" +dependencies = [ + "ahash 0.8.11", + "hashbrown 0.13.2", + "parking_lot 0.12.3", ] [[package]] @@ -15474,7 +15779,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "env_logger 0.8.4", "log", - "rand 0.8.5", + "rand", ] [[package]] @@ -15496,27 +15801,45 @@ checksum = "2e8b432585672228923edbbf64b8b12c14e1112f62e88737655b4a083dbcd78e" dependencies = [ "bytes", "pin-project-lite 0.2.12", - "quinn-proto", - "quinn-udp", + "quinn-proto 0.9.6", + "quinn-udp 0.3.2", "rustc-hash", - "rustls 0.20.8", + "rustls 0.20.9", "thiserror", "tokio", "tracing", "webpki", ] +[[package]] +name = "quinn" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +dependencies = [ + "bytes", + "futures-io", + "pin-project-lite 0.2.12", + "quinn-proto 0.10.6", + "quinn-udp 0.4.1", + "rustc-hash", + "rustls 0.21.7", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "quinn-proto" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c956be1b23f4261676aed05a0046e204e8a6836e50203902683a718af0797989" +checksum = "94b0b33c13a79f669c85defaf4c275dc86a0c0372807d0ca3d78e0bb87274863" dependencies = [ "bytes", - "rand 0.8.5", + "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.20.8", + "rustls 0.20.9", "slab", "thiserror", "tinyvec", @@ -15524,6 +15847,23 @@ dependencies = [ "webpki", ] +[[package]] +name = "quinn-proto" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" +dependencies = [ + "bytes", + "rand", + "ring 0.16.20", + "rustc-hash", + "rustls 0.21.7", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + [[package]] name = "quinn-udp" version = "0.3.2" @@ -15531,12 +15871,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "641538578b21f5e5c8ea733b736895576d0fe329bb883b937db6f4d163dbaaf4" dependencies = [ "libc", - "quinn-proto", + "quinn-proto 0.9.6", "socket2 0.4.9", "tracing", "windows-sys 0.42.0", ] +[[package]] +name = "quinn-udp" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +dependencies = [ + "bytes", + "libc", + "socket2 0.5.7", + "tracing", + "windows-sys 0.48.0", +] + [[package]] name = "quote" version = "0.6.13" @@ -15561,19 +15914,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -15581,18 +15921,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -15602,16 +15932,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -15620,26 +15941,17 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", -] - -[[package]] -name = "rand_distr" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" -dependencies = [ - "num-traits", - "rand 0.8.5", + "getrandom", ] [[package]] -name = "rand_hc" -version = "0.2.0" +name = "rand_distr" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ - "rand_core 0.5.1", + "num-traits", + "rand", ] [[package]] @@ -15648,7 +15960,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -15657,7 +15969,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -15760,7 +16072,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.10", + "getrandom", "redox_syscall 0.2.16", "thiserror", ] @@ -15890,23 +16202,22 @@ dependencies = [ "bp-runtime", "finality-relay", "frame-support", - "frame-system", "futures", "jsonrpsee", "log", "num-traits", - "pallet-balances", - "pallet-bridge-messages", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "parity-scale-codec", - "rand 0.8.5", + "quick_cache", + "rand", "relay-utils", "sc-chain-spec", "sc-rpc-api", "sc-transaction-pool-api", "scale-info", + "serde_json", "sp-consensus-grandpa", "sp-core", "sp-rpc", @@ -15935,7 +16246,7 @@ dependencies = [ "jsonpath_lib", "log", "num-traits", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "serde_json", "sp-runtime", "substrate-prometheus-endpoint", @@ -15971,11 +16282,11 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-rustls", + "h2 0.3.26", + "http 0.2.9", + "http-body 0.4.5", + "hyper 0.14.29", + "hyper-rustls 0.24.2", "ipnet", "js-sys", "log", @@ -15983,7 +16294,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite 0.2.12", - "rustls 0.21.6", + "rustls 0.21.7", "rustls-pemfile 1.0.3", "serde", "serde_json", @@ -16057,22 +16368,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", - "getrandom 0.2.10", + "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", "windows-sys 0.48.0", ] -[[package]] -name = "ripemd" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -16271,7 +16573,7 @@ dependencies = [ "substrate-wasm-builder", "tiny-keccak", "tokio", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -16402,7 +16704,7 @@ dependencies = [ "parity-scale-codec", "primitive-types", "proptest", - "rand 0.8.5", + "rand", "rlp", "ruint-macro", "serde", @@ -16504,7 +16806,7 @@ version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys 0.4.10", @@ -16513,11 +16815,10 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" dependencies = [ - "log", "ring 0.16.20", "sct", "webpki", @@ -16525,9 +16826,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.6" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring 0.16.20", @@ -16537,14 +16838,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.2" +version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ "log", + "once_cell", "ring 0.17.7", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki 0.102.4", "subtle 2.5.0", "zeroize", ] @@ -16595,9 +16897,36 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.2.0" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" + +[[package]] +name = "rustls-platform-verifier" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5f0d26fa1ce3c790f9590868f0109289a044acb954525f933e2aa3b871c157d" +dependencies = [ + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls 0.23.10", + "rustls-native-certs 0.7.0", + "rustls-platform-verifier-android", + "rustls-webpki 0.102.4", + "security-framework", + "security-framework-sys", + "webpki-roots 0.26.3", + "winapi", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a716eb65e3158e90e17cd93d855216e27bde02745ab842f2cab4a39dba1bacf" +checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" [[package]] name = "rustls-webpki" @@ -16611,9 +16940,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "ring 0.17.7", "rustls-pki-types", @@ -16651,9 +16980,9 @@ dependencies = [ [[package]] name = "rw-stream-sink" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" dependencies = [ "futures", "pin-project", @@ -16714,13 +17043,12 @@ dependencies = [ "libp2p", "linked_hash_set", "log", - "multihash 0.17.0", - "multihash-codetable", + "multihash 0.19.1", "parity-scale-codec", - "prost 0.12.4", + "prost 0.12.6", "prost-build 0.12.4", "quickcheck", - "rand 0.8.5", + "rand", "sc-client-api", "sc-network", "sc-network-types", @@ -16744,7 +17072,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -16835,7 +17163,7 @@ dependencies = [ "names", "parity-bip39", "parity-scale-codec", - "rand 0.8.5", + "rand", "regex", "rpassword", "sc-client-api", @@ -16870,7 +17198,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-executor", "sc-transaction-pool-api", "sc-utils", @@ -16906,9 +17234,9 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "quickcheck", - "rand 0.8.5", + "rand", "sc-client-api", "sc-state-db", "schnellru", @@ -16930,10 +17258,9 @@ version = "0.33.0" dependencies = [ "async-trait", "futures", - "futures-timer", "log", "mockall 0.11.4", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-client-api", "sc-network-types", "sc-utils", @@ -16957,7 +17284,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -16999,7 +17326,7 @@ dependencies = [ "num-rational", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -17068,7 +17395,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -17109,7 +17436,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-consensus-beefy", "sc-rpc", "serde", @@ -17150,8 +17477,8 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -17256,7 +17583,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-client-api", "sc-consensus", "sp-api", @@ -17304,7 +17631,7 @@ dependencies = [ "env_logger 0.11.3", "num_cpus", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "paste", "regex", "sc-executor-common", @@ -17366,7 +17693,7 @@ dependencies = [ "libc", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "paste", "rustix 0.36.15", "sc-allocator", @@ -17401,7 +17728,7 @@ name = "sc-keystore" version = "25.0.0" dependencies = [ "array-bytes", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "serde_json", "sp-application-crypto", "sp-core", @@ -17422,9 +17749,9 @@ dependencies = [ "futures-timer", "log", "mixnet", - "multiaddr", + "multiaddr 0.18.1", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-client-api", "sc-network", "sc-network-types", @@ -17462,12 +17789,12 @@ dependencies = [ "multistream-select", "once_cell", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "partial_sort", "pin-project", - "prost 0.12.4", + "prost 0.12.6", "prost-build 0.12.4", - "rand 0.8.5", + "rand", "sc-block-builder", "sc-client-api", "sc-network-common", @@ -17496,7 +17823,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "unsigned-varint", + "unsigned-varint 0.7.2", "void", "wasm-timer", "zeroize", @@ -17528,7 +17855,6 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "libp2p", "log", "parity-scale-codec", "quickcheck", @@ -17553,7 +17879,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "prost 0.12.4", + "prost 0.12.6", "prost-build 0.12.4", "sc-client-api", "sc-network", @@ -17571,7 +17897,6 @@ dependencies = [ "array-bytes", "async-channel", "futures", - "libp2p", "log", "parity-scale-codec", "sc-network", @@ -17598,7 +17923,7 @@ dependencies = [ "log", "mockall 0.11.4", "parity-scale-codec", - "prost 0.12.4", + "prost 0.12.6", "prost-build 0.12.4", "quickcheck", "sc-block-builder", @@ -17634,8 +17959,8 @@ dependencies = [ "futures-timer", "libp2p", "log", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -17662,7 +17987,6 @@ version = "0.33.0" dependencies = [ "array-bytes", "futures", - "libp2p", "log", "parity-scale-codec", "sc-network", @@ -17680,13 +18004,14 @@ name = "sc-network-types" version = "0.10.0" dependencies = [ "bs58 0.5.0", - "ed25519-dalek 2.1.1", + "ed25519-dalek", "libp2p-identity", "litep2p", - "multiaddr", - "multihash 0.17.0", + "log", + "multiaddr 0.18.1", + "multihash 0.19.1", "quickcheck", - "rand 0.8.5", + "rand", "thiserror", "zeroize", ] @@ -17701,16 +18026,15 @@ dependencies = [ "fnv", "futures", "futures-timer", - "hyper", - "hyper-rustls", + "hyper 0.14.29", + "hyper-rustls 0.24.2", "lazy_static", - "libp2p", "log", "num_cpus", "once_cell", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -17752,7 +18076,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pretty_assertions", "sc-block-builder", "sc-chain-spec", @@ -17810,11 +18134,13 @@ dependencies = [ "forwarded-header-value", "futures", "governor", - "http", - "hyper", + "http 1.1.0", + "http-body-util", + "hyper 1.3.1", "ip_network", "jsonrpsee", "log", + "serde", "serde_json", "substrate-prometheus-endpoint", "tokio", @@ -17834,9 +18160,9 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pretty_assertions", - "rand 0.8.5", + "rand", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -17889,9 +18215,9 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "rand 0.8.5", + "rand", "sc-chain-spec", "sc-client-api", "sc-client-db", @@ -17952,7 +18278,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -17984,7 +18310,7 @@ version = "0.30.0" dependencies = [ "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sp-core", ] @@ -17995,7 +18321,7 @@ dependencies = [ "env_logger 0.11.3", "log", "parity-db", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-client-api", "sc-keystore", "sp-api", @@ -18046,7 +18372,7 @@ dependencies = [ "futures", "libc", "log", - "rand 0.8.5", + "rand", "rand_pcg", "regex", "sc-telemetry", @@ -18067,9 +18393,9 @@ dependencies = [ "futures", "libp2p", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "rand 0.8.5", + "rand", "sc-network", "sc-utils", "serde", @@ -18090,7 +18416,7 @@ dependencies = [ "libc", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "regex", "rustc-hash", "sc-client-api", @@ -18131,7 +18457,7 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-transaction-pool-api", @@ -18177,7 +18503,7 @@ dependencies = [ "futures-timer", "lazy_static", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "prometheus", "sp-arithmetic", "tokio-test", @@ -18195,9 +18521,9 @@ dependencies = [ [[package]] name = "scale-decode" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12ebca36cec2a3f983c46295b282b35e5f8496346fb859a8776dad5389e5389" +checksum = "e98f3262c250d90e700bb802eb704e1f841e03331c2eb815e46516c4edbf5b27" dependencies = [ "derive_more", "parity-scale-codec", @@ -18292,7 +18618,7 @@ dependencies = [ "arrayvec 0.7.4", "curve25519-dalek-ng", "merlin", - "rand_core 0.6.4", + "rand_core", "sha2 0.9.9", "subtle-ng", "zeroize", @@ -18307,10 +18633,10 @@ dependencies = [ "aead", "arrayref", "arrayvec 0.7.4", - "curve25519-dalek 4.1.2", + "curve25519-dalek", "getrandom_or_panic", "merlin", - "rand_core 0.6.4", + "rand_core", "serde_bytes", "sha2 0.10.8", "subtle 2.5.0", @@ -18355,7 +18681,7 @@ dependencies = [ "crc", "fxhash", "log", - "rand 0.8.5", + "rand", "slab", "thiserror", ] @@ -18413,22 +18739,23 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", + "num-bigint", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -18520,6 +18847,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "separator" version = "0.4.1" @@ -18650,7 +18983,7 @@ dependencies = [ "futures", "lazy_static", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "serial_test_derive", ] @@ -18815,12 +19148,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" - [[package]] name = "signature" version = "2.1.0" @@ -18828,7 +19155,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -18850,7 +19177,7 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cae9a3fcdadafb6d97f4c0e007e4247b114ee0f119f650c3cbf3a8b3a1479694" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", ] [[package]] @@ -18931,12 +19258,12 @@ dependencies = [ "async-channel", "async-executor", "async-fs", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "async-net", "async-process", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -18955,7 +19282,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0bb30cf57b7b5f6109ce17c3164445e2d6f270af2cb48f6e4d31c2967c9a9f5" dependencies = [ "arrayvec 0.7.4", - "async-lock", + "async-lock 2.8.0", "atomic-take", "base64 0.21.2", "bip39", @@ -18966,9 +19293,9 @@ dependencies = [ "derive_more", "ed25519-zebra", "either", - "event-listener", + "event-listener 2.5.3", "fnv", - "futures-lite", + "futures-lite 1.13.0", "futures-util", "hashbrown 0.14.3", "hex", @@ -18984,8 +19311,8 @@ dependencies = [ "pbkdf2", "pin-project", "poly1305", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "ruzstd", "schnorrkel 0.10.2", "serde", @@ -18995,10 +19322,10 @@ dependencies = [ "siphasher", "slab", "smallvec", - "soketto", + "soketto 0.7.1", "twox-hash", "wasmi 0.31.2", - "x25519-dalek 2.0.0", + "x25519-dalek", "zeroize", ] @@ -19009,15 +19336,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "256b5bad1d6b49045e95fe87492ce73d5af81545d8b4d8318a872d2007024c33" dependencies = [ "async-channel", - "async-lock", + "async-lock 2.8.0", "base64 0.21.2", "blake2-rfc", "derive_more", "either", - "event-listener", + "event-listener 2.5.3", "fnv", "futures-channel", - "futures-lite", + "futures-lite 1.13.0", "futures-util", "hashbrown 0.14.3", "hex", @@ -19025,10 +19352,10 @@ dependencies = [ "log", "lru 0.11.0", "no-std-net", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "serde", "serde_json", "siphasher", @@ -19053,8 +19380,8 @@ dependencies = [ "aes-gcm", "blake2 0.10.6", "chacha20poly1305", - "curve25519-dalek 4.1.2", - "rand_core 0.6.4", + "curve25519-dalek", + "rand_core", "ring 0.17.7", "rustc_version 0.4.0", "sha2 0.10.8", @@ -19126,7 +19453,7 @@ dependencies = [ "hex-literal", "parity-bytes", "parity-scale-codec", - "rand 0.8.5", + "rand", "rlp", "scale-info", "serde", @@ -19147,7 +19474,7 @@ dependencies = [ "hex", "lazy_static", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "snowbridge-amcl", "zeroize", @@ -19191,7 +19518,7 @@ dependencies = [ "log", "pallet-timestamp", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "serde", "serde_json", @@ -19315,7 +19642,6 @@ dependencies = [ "hex-literal", "log", "parity-scale-codec", - "rustc-hex", "scale-info", "snowbridge-core", "sp-core", @@ -19394,9 +19720,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -19410,15 +19736,29 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "flate2", "futures", - "http", "httparse", "log", - "rand 0.8.5", + "rand", "sha-1 0.9.8", ] +[[package]] +name = "soketto" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures", + "http 1.1.0", + "httparse", + "log", + "rand", + "sha1", +] + [[package]] name = "solochain-template-node" version = "0.0.0" @@ -19492,7 +19832,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -19503,6 +19842,7 @@ dependencies = [ name = "sp-api" version = "26.0.0" dependencies = [ + "docify", "hash-db", "log", "parity-scale-codec", @@ -19591,7 +19931,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "primitive-types", - "rand 0.8.5", + "rand", "scale-info", "serde", "sp-crypto-hashing", @@ -19655,10 +19995,11 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "schnellru", "sp-api", "sp-consensus", + "sp-core", "sp-database", "sp-runtime", "sp-state-machine", @@ -19808,10 +20149,10 @@ dependencies = [ "merlin", "parity-bip39", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "paste", "primitive-types", - "rand 0.8.5", + "rand", "regex", "scale-info", "schnorrkel 0.11.4", @@ -19925,7 +20266,7 @@ name = "sp-database" version = "10.0.0" dependencies = [ "kvdb", - "parking_lot 0.12.1", + "parking_lot 0.12.3", ] [[package]] @@ -19996,7 +20337,8 @@ name = "sp-io" version = "30.0.0" dependencies = [ "bytes", - "ed25519-dalek 2.1.1", + "docify", + "ed25519-dalek", "libsecp256k1", "log", "parity-scale-codec", @@ -20030,9 +20372,9 @@ name = "sp-keystore" version = "0.34.0" dependencies = [ "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", - "rand_chacha 0.3.1", + "parking_lot 0.12.3", + "rand", + "rand_chacha", "sp-core", "sp-externalities 0.25.0", ] @@ -20086,7 +20428,7 @@ name = "sp-npos-elections" version = "26.0.0" dependencies = [ "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "serde", "sp-arithmetic", @@ -20101,7 +20443,7 @@ version = "2.0.0-alpha.5" dependencies = [ "clap 4.5.3", "honggfuzz", - "rand 0.8.5", + "rand", "sp-npos-elections", "sp-runtime", ] @@ -20146,7 +20488,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "paste", - "rand 0.8.5", + "rand", "scale-info", "serde", "serde_json", @@ -20161,6 +20503,7 @@ dependencies = [ "sp-tracing 16.0.0", "sp-weights", "substrate-test-runtime-client", + "tracing", "zstd 0.12.4", ] @@ -20297,14 +20640,15 @@ dependencies = [ name = "sp-state-machine" version = "0.35.0" dependencies = [ + "arbitrary", "array-bytes", "assert_matches", "hash-db", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pretty_assertions", - "rand 0.8.5", + "rand", "smallvec", "sp-core", "sp-externalities 0.25.0", @@ -20321,11 +20665,11 @@ name = "sp-statement-store" version = "10.0.0" dependencies = [ "aes-gcm", - "curve25519-dalek 4.1.2", - "ed25519-dalek 2.1.1", + "curve25519-dalek", + "ed25519-dalek", "hkdf", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sha2 0.10.8", "sp-api", @@ -20336,7 +20680,7 @@ dependencies = [ "sp-runtime", "sp-runtime-interface 24.0.0", "thiserror", - "x25519-dalek 2.0.0", + "x25519-dalek", ] [[package]] @@ -20450,8 +20794,8 @@ dependencies = [ "memory-db", "nohash-hasher", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "scale-info", "schnellru", "sp-core", @@ -20638,13 +20982,13 @@ dependencies = [ "parity-scale-codec", "platforms", "polkadot-sdk", - "rand 0.8.5", + "rand", "regex", "sc-service-test", "scale-info", "serde", "serde_json", - "soketto", + "soketto 0.7.1", "staging-node-inspect", "substrate-cli-test-utils", "tempfile", @@ -20747,7 +21091,6 @@ dependencies = [ "frame-benchmarking", "frame-support", "impl-trait-for-tuples", - "log", "parity-scale-codec", "scale-info", "sp-arithmetic", @@ -20757,6 +21100,7 @@ dependencies = [ "sp-std 14.0.0", "sp-weights", "staging-xcm", + "tracing", ] [[package]] @@ -20824,19 +21168,6 @@ dependencies = [ "serde", ] -[[package]] -name = "strobe-rs" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabb238a1cccccfa4c4fb703670c0d157e1256c1ba695abf1b93bd2bb14bab2d" -dependencies = [ - "bitflags 1.3.2", - "byteorder", - "keccak", - "subtle 2.5.0", - "zeroize", -] - [[package]] name = "strsim" version = "0.8.0" @@ -21029,7 +21360,9 @@ dependencies = [ name = "substrate-prometheus-endpoint" version = "0.17.0" dependencies = [ - "hyper", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "log", "prometheus", "thiserror", @@ -21049,9 +21382,7 @@ dependencies = [ "bp-polkadot-core", "bp-relayers", "bp-runtime", - "bridge-runtime-common", "equivocation-detector", - "finality-grandpa", "finality-relay", "frame-support", "frame-system", @@ -21071,9 +21402,11 @@ dependencies = [ "rbtag", "relay-substrate-client", "relay-utils", + "scale-info", "sp-consensus-grandpa", "sp-core", "sp-runtime", + "sp-trie", "structopt", "strum 0.26.2", "thiserror", @@ -21147,7 +21480,6 @@ dependencies = [ "frame-system", "frame-system-rpc-runtime-api", "futures", - "hex-literal", "log", "pallet-babe", "pallet-balances", @@ -21185,6 +21517,7 @@ dependencies = [ "sp-version", "substrate-test-runtime-client", "substrate-wasm-builder", + "tracing", "trie-db", ] @@ -21211,7 +21544,7 @@ version = "2.0.0" dependencies = [ "futures", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-transaction-pool", "sc-transaction-pool-api", "sp-blockchain", @@ -21423,6 +21756,17 @@ dependencies = [ "unicode-xid 0.2.4", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "sysinfo" version = "0.30.5" @@ -21863,10 +22207,10 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project-lite 0.2.12", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -21889,7 +22233,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" dependencies = [ "pin-project", - "rand 0.8.5", + "rand", "tokio", ] @@ -21899,17 +22243,17 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.6", + "rustls 0.21.7", "tokio", ] [[package]] name = "tokio-rustls" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.22.2", + "rustls 0.23.10", "rustls-pki-types", "tokio", ] @@ -21947,7 +22291,7 @@ checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", - "rustls 0.21.6", + "rustls 0.21.7", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", @@ -22032,6 +22376,7 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite 0.2.12", + "tokio", "tower-layer", "tower-service", "tracing", @@ -22039,17 +22384,15 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "pin-project-lite 0.2.12", "tower-layer", "tower-service", @@ -22196,7 +22539,7 @@ dependencies = [ "matchers 0.1.0", "nu-ansi-term", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "regex", "sharded-slab", "smallvec", @@ -22269,7 +22612,7 @@ dependencies = [ "idna 0.2.3", "ipnet", "lazy_static", - "rand 0.8.5", + "rand", "smallvec", "socket2 0.4.9", "thiserror", @@ -22295,7 +22638,7 @@ dependencies = [ "idna 0.4.0", "ipnet", "once_cell", - "rand 0.8.5", + "rand", "smallvec", "thiserror", "tinyvec", @@ -22304,26 +22647,6 @@ dependencies = [ "url", ] -[[package]] -name = "trust-dns-resolver" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lazy_static", - "lru-cache", - "parking_lot 0.12.1", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "tracing", - "trust-dns-proto 0.22.0", -] - [[package]] name = "trust-dns-resolver" version = "0.23.2" @@ -22335,8 +22658,8 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "resolv-conf", "smallvec", "thiserror", @@ -22382,11 +22705,11 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 0.2.9", "httparse", "log", - "rand 0.8.5", - "rustls 0.21.6", + "rand", + "rustls 0.21.7", "sha1", "thiserror", "url", @@ -22407,7 +22730,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.8.5", + "rand", "static_assertions", ] @@ -22512,6 +22835,15 @@ dependencies = [ "bytes", "futures-io", "futures-util", +] + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" +dependencies = [ + "bytes", "tokio-util", ] @@ -22637,9 +22969,9 @@ dependencies = [ "arrayref", "constcat", "digest 0.10.7", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand", + "rand_chacha", + "rand_core", "sha2 0.10.8", "sha3", "thiserror", @@ -22680,12 +23012,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -23122,7 +23448,7 @@ dependencies = [ "memfd", "memoffset 0.8.0", "paste", - "rand 0.8.5", + "rand", "rustix 0.36.15", "wasmtime-asm-macros", "wasmtime-environ", @@ -23185,18 +23511,18 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.6" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki", -] +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +dependencies = [ + "rustls-pki-types", +] [[package]] name = "westend-emulated-chain" @@ -23215,7 +23541,7 @@ dependencies = [ "staging-xcm", "westend-runtime", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -23292,7 +23618,6 @@ dependencies = [ "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", - "rustc-hex", "scale-info", "serde", "serde_derive", @@ -23328,7 +23653,7 @@ dependencies = [ "tiny-keccak", "tokio", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -23419,23 +23744,20 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.34.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows_aarch64_msvc 0.34.0", - "windows_i686_gnu 0.34.0", - "windows_i686_msvc 0.34.0", - "windows_x86_64_gnu 0.34.0", - "windows_x86_64_msvc 0.34.0", + "windows-targets 0.48.5", ] [[package]] name = "windows" -version = "0.48.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ + "windows-core 0.51.1", "windows-targets 0.48.5", ] @@ -23445,10 +23767,19 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core", + "windows-core 0.52.0", "windows-targets 0.52.0", ] +[[package]] +name = "windows-core" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-core" version = "0.52.0" @@ -23563,12 +23894,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" -[[package]] -name = "windows_aarch64_msvc" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -23587,12 +23912,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" -[[package]] -name = "windows_i686_gnu" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -23611,12 +23930,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" -[[package]] -name = "windows_i686_msvc" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -23635,12 +23948,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" -[[package]] -name = "windows_x86_64_gnu" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -23677,12 +23984,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" -[[package]] -name = "windows_x86_64_msvc" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -23729,42 +24030,30 @@ dependencies = [ "tap", ] -[[package]] -name = "x25519-dalek" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" -dependencies = [ - "curve25519-dalek 3.2.0", - "rand_core 0.5.1", - "zeroize", -] - [[package]] name = "x25519-dalek" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 4.1.2", - "rand_core 0.6.4", + "curve25519-dalek", + "rand_core", "serde", "zeroize", ] [[package]] name = "x509-parser" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" dependencies = [ - "asn1-rs", - "base64 0.13.1", + "asn1-rs 0.5.2", "data-encoding", - "der-parser", + "der-parser 8.2.0", "lazy_static", "nom", - "oid-registry", + "oid-registry 0.6.1", "rusticata-macros", "thiserror", "time", @@ -23772,16 +24061,16 @@ dependencies = [ [[package]] name = "x509-parser" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" dependencies = [ - "asn1-rs", + "asn1-rs 0.6.1", "data-encoding", - "der-parser", + "der-parser 9.0.0", "lazy_static", "nom", - "oid-registry", + "oid-registry 0.7.0", "rusticata-macros", "thiserror", "time", @@ -23879,13 +24168,26 @@ dependencies = [ ] [[package]] -name = "xcm-fee-payment-runtime-api" +name = "xcm-procedural" +version = "7.0.0" +dependencies = [ + "Inflector", + "proc-macro2 1.0.82", + "quote 1.0.35", + "staging-xcm", + "syn 2.0.61", + "trybuild", +] + +[[package]] +name = "xcm-runtime-apis" version = "0.1.0" dependencies = [ - "env_logger 0.9.3", + "env_logger 0.11.3", "frame-executive", "frame-support", "frame-system", + "hex-literal", "log", "pallet-assets", "pallet-balances", @@ -23894,7 +24196,6 @@ dependencies = [ "scale-info", "sp-api", "sp-io", - "sp-runtime", "sp-std 14.0.0", "sp-weights", "staging-xcm", @@ -23902,18 +24203,6 @@ dependencies = [ "staging-xcm-executor", ] -[[package]] -name = "xcm-procedural" -version = "7.0.0" -dependencies = [ - "Inflector", - "proc-macro2 1.0.82", - "quote 1.0.35", - "staging-xcm", - "syn 2.0.61", - "trybuild", -] - [[package]] name = "xcm-simulator" version = "7.0.0" @@ -23990,17 +24279,33 @@ dependencies = [ "xcm-simulator", ] +[[package]] +name = "xml-rs" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +dependencies = [ + "xml-rs", +] + [[package]] name = "yamux" -version = "0.10.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" +checksum = "9ed0164ae619f2dc144909a9f082187ebb5893693d8c0196e8085283ccd4b776" dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "pin-project", + "rand", "static_assertions", ] @@ -24041,9 +24346,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 2b2a1cdc17d5ca70d876f6fc907f8e1fdd3cfd52..5c2677fffeb225888a1ac8af392c287634cf59e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -226,7 +226,7 @@ members = [ "polkadot/xcm/xcm-builder", "polkadot/xcm/xcm-executor", "polkadot/xcm/xcm-executor/integration-tests", - "polkadot/xcm/xcm-fee-payment-runtime-api", + "polkadot/xcm/xcm-runtime-apis", "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", @@ -306,6 +306,7 @@ members = [ "substrate/frame/asset-conversion/ops", "substrate/frame/asset-rate", "substrate/frame/assets", + "substrate/frame/assets-freezer", "substrate/frame/atomic-swap", "substrate/frame/aura", "substrate/frame/authority-discovery", @@ -564,19 +565,787 @@ extra-unused-type-parameters = { level = "allow", priority = 2 } # stylistic default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic [workspace.dependencies] +Inflector = { version = "0.11.4" } +aes-gcm = { version = "0.10" } +ahash = { version = "0.8.2" } +alloy-primitives = { version = "0.4.2", default-features = false } +alloy-sol-types = { version = "0.4.2", default-features = false } +always-assert = { version = "0.1" } +ansi_term = { version = "0.12.1" } +anyhow = { version = "1.0.81" } +aquamarine = { version = "0.5.0" } +arbitrary = { version = "1.3.2" } +ark-bls12-377 = { version = "0.4.0", default-features = false } +ark-bls12-377-ext = { version = "0.4.1", default-features = false } +ark-bls12-381 = { version = "0.4.0", default-features = false } +ark-bls12-381-ext = { version = "0.4.1", default-features = false } +ark-bw6-761 = { version = "0.4.0", default-features = false } +ark-bw6-761-ext = { version = "0.4.1", default-features = false } +ark-ec = { version = "0.4.2", default-features = false } +ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false } +ark-ed-on-bls12-377-ext = { version = "0.4.1", default-features = false } +ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false } +ark-ed-on-bls12-381-bandersnatch-ext = { version = "0.4.1", default-features = false } +ark-scale = { version = "0.0.12", default-features = false } +array-bytes = { version = "6.2.2", default-features = false } +arrayvec = { version = "0.7.4" } +assert_cmd = { version = "2.0.10" } +assert_matches = { version = "1.5.0" } +asset-hub-rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo" } +asset-hub-rococo-runtime = { path = "cumulus/parachains/runtimes/assets/asset-hub-rococo", default-features = false } +asset-hub-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend" } +asset-hub-westend-runtime = { path = "cumulus/parachains/runtimes/assets/asset-hub-westend" } +asset-test-utils = { path = "cumulus/parachains/runtimes/assets/test-utils", default-features = false } +assets-common = { path = "cumulus/parachains/runtimes/assets/common", default-features = false } +async-channel = { version = "1.8.0" } +async-std = { version = "1.9.0" } +async-trait = { version = "0.1.79" } +asynchronous-codec = { version = "0.6" } +backoff = { version = "0.4" } +backtrace = { version = "0.3.64" } +binary-merkle-tree = { path = "substrate/utils/binary-merkle-tree", default-features = false } +bincode = { version = "1.3.3" } +bip39 = { version = "2.0.0" } +bitflags = { version = "1.3.2" } +bitvec = { version = "1.0.1", default-features = false } +blake2 = { version = "0.10.4", default-features = false } +blake2b_simd = { version = "1.0.1", default-features = false } +blake3 = { version = "1.5" } +bounded-collections = { version = "0.2.0", default-features = false } +bounded-vec = { version = "0.7" } +bp-asset-hub-rococo = { path = "bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "bridges/chains/chain-asset-hub-westend", default-features = false } +bp-beefy = { path = "bridges/primitives/beefy", default-features = false } +bp-bridge-hub-cumulus = { path = "bridges/chains/chain-bridge-hub-cumulus", default-features = false } +bp-bridge-hub-kusama = { default-features = false, path = "bridges/chains/chain-bridge-hub-kusama" } +bp-bridge-hub-polkadot = { path = "bridges/chains/chain-bridge-hub-polkadot", default-features = false } +bp-bridge-hub-rococo = { path = "bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "bridges/chains/chain-bridge-hub-westend", default-features = false } +bp-header-chain = { path = "bridges/primitives/header-chain", default-features = false } +bp-kusama = { default-features = false, path = "bridges/chains/chain-kusama" } +bp-messages = { path = "bridges/primitives/messages", default-features = false } +bp-parachains = { path = "bridges/primitives/parachains", default-features = false } +bp-polkadot = { default-features = false, path = "bridges/chains/chain-polkadot" } +bp-polkadot-bulletin = { path = "bridges/chains/chain-polkadot-bulletin", default-features = false } +bp-polkadot-core = { path = "bridges/primitives/polkadot-core", default-features = false } +bp-relayers = { path = "bridges/primitives/relayers", default-features = false } +bp-rococo = { path = "bridges/chains/chain-rococo", default-features = false } +bp-runtime = { path = "bridges/primitives/runtime", default-features = false } +bp-test-utils = { path = "bridges/primitives/test-utils", default-features = false } +bp-westend = { path = "bridges/chains/chain-westend", default-features = false } +bp-xcm-bridge-hub = { path = "bridges/primitives/xcm-bridge-hub", default-features = false } +bp-xcm-bridge-hub-router = { path = "bridges/primitives/xcm-bridge-hub-router", default-features = false } +bridge-hub-common = { path = "cumulus/parachains/runtimes/bridge-hubs/common", default-features = false } +bridge-hub-rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo" } +bridge-hub-rococo-runtime = { path = "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } +bridge-hub-test-utils = { path = "cumulus/parachains/runtimes/bridge-hubs/test-utils", default-features = false } +bridge-hub-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend" } +bridge-hub-westend-runtime = { path = "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } +bridge-runtime-common = { path = "bridges/bin/runtime-common", default-features = false } +bs58 = { version = "0.5.0", default-features = false } +build-helper = { version = "0.1.1" } +byte-slice-cast = { version = "1.2.1", default-features = false } +byteorder = { version = "1.3.2", default-features = false } +bytes = { version = "1.4.0", default-features = false } +cargo_metadata = { version = "0.15.4" } +cfg-expr = { version = "0.15.5" } +cfg-if = { version = "1.0" } +chain-spec-builder = { path = "substrate/bin/utils/chain-spec-builder", default-features = false, package = "staging-chain-spec-builder" } +chain-spec-guide-runtime = { path = "docs/sdk/src/reference_docs/chain_spec_runtime" } +chrono = { version = "0.4.31" } +cid = { version = "0.9.0" } +clap = { version = "4.5.3" } +clap-num = { version = "1.0.2" } +clap_complete = { version = "4.0.2" } +coarsetime = { version = "0.1.22" } +codec = { version = "3.6.12", default-features = false, package = "parity-scale-codec" } +collectives-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend" } +collectives-westend-runtime = { path = "cumulus/parachains/runtimes/collectives/collectives-westend" } +color-eyre = { version = "0.6.1", default-features = false } +color-print = { version = "0.3.4" } +colored = { version = "2.0.4" } +comfy-table = { version = "7.1.0", default-features = false } +console = { version = "0.15.8" } +contracts-rococo-runtime = { path = "cumulus/parachains/runtimes/contracts/contracts-rococo" } +coretime-rococo-runtime = { path = "cumulus/parachains/runtimes/coretime/coretime-rococo" } +coretime-westend-runtime = { path = "cumulus/parachains/runtimes/coretime/coretime-westend" } +cpu-time = { version = "1.0.0" } +criterion = { version = "0.5.1", default-features = false } +cumulus-client-cli = { path = "cumulus/client/cli", default-features = false } +cumulus-client-collator = { path = "cumulus/client/collator", default-features = false } +cumulus-client-consensus-aura = { path = "cumulus/client/consensus/aura", default-features = false } +cumulus-client-consensus-common = { path = "cumulus/client/consensus/common", default-features = false } +cumulus-client-consensus-proposer = { path = "cumulus/client/consensus/proposer", default-features = false } +cumulus-client-consensus-relay-chain = { path = "cumulus/client/consensus/relay-chain", default-features = false } +cumulus-client-network = { path = "cumulus/client/network", default-features = false } +cumulus-client-parachain-inherent = { path = "cumulus/client/parachain-inherent", default-features = false } +cumulus-client-pov-recovery = { path = "cumulus/client/pov-recovery", default-features = false } +cumulus-client-service = { path = "cumulus/client/service", default-features = false } +cumulus-pallet-aura-ext = { path = "cumulus/pallets/aura-ext", default-features = false } +cumulus-pallet-dmp-queue = { default-features = false, path = "cumulus/pallets/dmp-queue" } +cumulus-pallet-parachain-system = { path = "cumulus/pallets/parachain-system", default-features = false } +cumulus-pallet-parachain-system-proc-macro = { path = "cumulus/pallets/parachain-system/proc-macro", default-features = false } +cumulus-pallet-session-benchmarking = { path = "cumulus/pallets/session-benchmarking", default-features = false } +cumulus-pallet-solo-to-para = { path = "cumulus/pallets/solo-to-para", default-features = false } +cumulus-pallet-xcm = { path = "cumulus/pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "cumulus/pallets/xcmp-queue", default-features = false } +cumulus-ping = { path = "cumulus/parachains/pallets/ping", default-features = false } +cumulus-primitives-aura = { path = "cumulus/primitives/aura", default-features = false } +cumulus-primitives-core = { path = "cumulus/primitives/core", default-features = false } +cumulus-primitives-parachain-inherent = { path = "cumulus/primitives/parachain-inherent", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "cumulus/primitives/proof-size-hostfunction", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "cumulus/primitives/storage-weight-reclaim", default-features = false } +cumulus-primitives-timestamp = { path = "cumulus/primitives/timestamp", default-features = false } +cumulus-primitives-utility = { path = "cumulus/primitives/utility", default-features = false } +cumulus-relay-chain-inprocess-interface = { path = "cumulus/client/relay-chain-inprocess-interface", default-features = false } +cumulus-relay-chain-interface = { path = "cumulus/client/relay-chain-interface", default-features = false } +cumulus-relay-chain-minimal-node = { path = "cumulus/client/relay-chain-minimal-node", default-features = false } +cumulus-relay-chain-rpc-interface = { path = "cumulus/client/relay-chain-rpc-interface", default-features = false } +cumulus-test-client = { path = "cumulus/test/client" } +cumulus-test-relay-sproof-builder = { path = "cumulus/test/relay-sproof-builder", default-features = false } +cumulus-test-runtime = { path = "cumulus/test/runtime" } +cumulus-test-service = { path = "cumulus/test/service" } +curve25519-dalek = { version = "4.1.3" } +derivative = { version = "2.2.0", default-features = false } +derive-syn-parse = { version = "0.2.0" } +derive_more = { version = "0.99.17", default-features = false } +digest = { version = "0.10.3", default-features = false } +directories = { version = "5.0.1" } +dlmalloc = { version = "0.2.4" } +docify = { version = "0.2.8" } +dyn-clonable = { version = "0.9.0" } +dyn-clone = { version = "1.0.16" } +ed25519-dalek = { version = "2.1", default-features = false } +ed25519-zebra = { version = "4.0.3", default-features = false } +either = { version = "1.8.1", default-features = false } +emulated-integration-tests-common = { path = "cumulus/parachains/integration-tests/emulated/common", default-features = false } +enumflags2 = { version = "0.7.7" } +enumn = { version = "0.1.12" } +env_logger = { version = "0.11.3" } +environmental = { version = "1.1.4", default-features = false } +equivocation-detector = { path = "bridges/relays/equivocation" } +ethabi = { version = "1.0.0", default-features = false, package = "ethabi-decode" } +ethbloom = { version = "0.13.0", default-features = false } +ethereum-types = { version = "0.14.1", default-features = false } +exit-future = { version = "0.2.0" } +expander = { version = "2.0.0" } +fatality = { version = "0.1.1" } +fdlimit = { version = "0.3.0" } +femme = { version = "2.2.1" } +filetime = { version = "0.2.16" } +finality-grandpa = { version = "0.16.2", default-features = false } +finality-relay = { path = "bridges/relays/finality" } +flate2 = { version = "1.0" } +fnv = { version = "1.0.6" } +fork-tree = { path = "substrate/utils/fork-tree", default-features = false } +forwarded-header-value = { version = "0.1.1" } +fraction = { version = "0.13.1" } +frame = { path = "substrate/frame", default-features = false, package = "polkadot-sdk-frame" } +frame-benchmarking = { path = "substrate/frame/benchmarking", default-features = false } +frame-benchmarking-cli = { path = "substrate/utils/frame/benchmarking-cli", default-features = false } +frame-benchmarking-pallet-pov = { default-features = false, path = "substrate/frame/benchmarking/pov" } +frame-election-provider-solution-type = { path = "substrate/frame/election-provider-support/solution-type", default-features = false } +frame-election-provider-support = { path = "substrate/frame/election-provider-support", default-features = false } +frame-executive = { path = "substrate/frame/executive", default-features = false } +frame-metadata = { version = "16.0.0", default-features = false } +frame-metadata-hash-extension = { path = "substrate/frame/metadata-hash-extension", default-features = false } +frame-support = { path = "substrate/frame/support", default-features = false } +frame-support-procedural = { path = "substrate/frame/support/procedural", default-features = false } +frame-support-procedural-tools = { path = "substrate/frame/support/procedural/tools", default-features = false } +frame-support-procedural-tools-derive = { path = "substrate/frame/support/procedural/tools/derive", default-features = false } +frame-support-test = { path = "substrate/frame/support/test" } +frame-system = { path = "substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "substrate/frame/system/benchmarking", default-features = false } +frame-system-rpc-runtime-api = { path = "substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "substrate/frame/try-runtime", default-features = false } +fs4 = { version = "0.7.0" } +fs_extra = { version = "1.3.0" } +futures = { version = "0.3.30" } +futures-channel = { version = "0.3.23" } +futures-timer = { version = "3.0.2" } +futures-util = { version = "0.3.30", default-features = false } +generate-bags = { path = "substrate/utils/frame/generate-bags", default-features = false } +gethostname = { version = "0.2.3" } +glob = { version = "0.3" } +glutton-westend-runtime = { path = "cumulus/parachains/runtimes/glutton/glutton-westend" } +governor = { version = "0.6.0" } +gum = { path = "polkadot/node/gum", default-features = false, package = "tracing-gum" } +gum-proc-macro = { path = "polkadot/node/gum/proc-macro", default-features = false, package = "tracing-gum-proc-macro" } +handlebars = { version = "5.1.0" } +hash-db = { version = "0.16.0", default-features = false } +hash256-std-hasher = { version = "0.15.2", default-features = false } +hex = { version = "0.4.3", default-features = false } +hex-literal = { version = "0.4.1", default-features = false } +hkdf = { version = "0.12.0" } +hmac = { version = "0.12.1" } +honggfuzz = { version = "0.5.55" } +http = { version = "1.1" } +http-body = { version = "1", default-features = false } +http-body-util = { version = "0.1.2", default-features = false } +hyper = { version = "1.3.1", default-features = false } +hyper-rustls = { version = "0.24.2" } +hyper-util = { version = "0.1.5", default-features = false } +# TODO: remove hyper v0.14 https://github.com/paritytech/polkadot-sdk/issues/4896 +hyperv14 = { package = "hyper", version = "0.14.29", default-features = false } +impl-serde = { version = "0.4.0", default-features = false } +impl-trait-for-tuples = { version = "0.2.2" } +indexmap = { version = "2.0.0" } +indicatif = { version = "0.17.7" } +integer-sqrt = { version = "0.1.2" } +ip_network = { version = "0.4.1" } +is-terminal = { version = "0.4.9" } +is_executable = { version = "1.0.1" } +isahc = { version = "1.2" } +itertools = { version = "0.11" } +jsonpath_lib = { version = "0.3" } +jsonrpsee = { version = "0.23.2" } +jsonrpsee-core = { version = "0.23.2" } +k256 = { version = "0.13.3", default-features = false } +kitchensink-runtime = { path = "substrate/bin/node/runtime" } +kvdb = { version = "0.13.0" } +kvdb-memorydb = { version = "0.13.0" } +kvdb-rocksdb = { version = "0.19.0" } +kvdb-shared-tests = { version = "0.11.0" } +landlock = { version = "0.3.0" } +lazy_static = { version = "1.4.0" } +libc = { version = "0.2.153" } +libfuzzer-sys = { version = "0.4" } +libp2p = { version = "0.52.4" } +libp2p-identity = { version = "0.2.3" } +libsecp256k1 = { version = "0.7.0", default-features = false } +linked-hash-map = { version = "0.5.4" } +linked_hash_set = { version = "0.1.4" } +linregress = { version = "0.5.1" } +lite-json = { version = "0.2.0", default-features = false } +litep2p = { version = "0.6.2" } +log = { version = "0.4.21", default-features = false } +macro_magic = { version = "0.5.1" } +maplit = { version = "1.0.2" } +memmap2 = { version = "0.9.3" } +memory-db = { version = "0.32.0", default-features = false } +merkleized-metadata = { version = "0.1.0" } +merlin = { version = "3.0", default-features = false } +messages-relay = { path = "bridges/relays/messages" } +metered = { version = "0.6.1", default-features = false, package = "prioritized-metered-channel" } +mick-jaeger = { version = "0.1.8" } +milagro-bls = { version = "1.5.4", default-features = false, package = "snowbridge-milagro-bls" } +minimal-template-node = { path = "templates/minimal/node" } +minimal-template-runtime = { path = "templates/minimal/runtime" } +mixnet = { version = "0.7.0" } +mmr-gadget = { path = "substrate/client/merkle-mountain-range", default-features = false } +mmr-lib = { version = "0.5.2", package = "ckb-merkle-mountain-range" } +mmr-rpc = { path = "substrate/client/merkle-mountain-range/rpc", default-features = false } +mockall = { version = "0.11.3" } +multiaddr = { version = "0.18.1" } +multihash = { version = "0.19.1", default-features = false } +multihash-codetable = { version = "0.1.1" } +multistream-select = { version = "0.13.0" } +names = { version = "0.14.0", default-features = false } +nix = { version = "0.28.0" } +node-cli = { path = "substrate/bin/node/cli", package = "staging-node-cli" } +node-inspect = { path = "substrate/bin/node/inspect", default-features = false, package = "staging-node-inspect" } +node-primitives = { path = "substrate/bin/node/primitives", default-features = false } +node-rpc = { path = "substrate/bin/node/rpc" } +node-testing = { path = "substrate/bin/node/testing" } +nohash-hasher = { version = "0.2.0" } +novelpoly = { version = "2.0.0", package = "reed-solomon-novelpoly" } +num-bigint = { version = "0.4.3" } +num-format = { version = "0.4.3" } +num-rational = { version = "0.4.1" } +num-traits = { version = "0.2.17", default-features = false } +num_cpus = { version = "1.13.1" } +once_cell = { version = "1.19.0" } +orchestra = { version = "0.3.5", default-features = false } +pallet-alliance = { path = "substrate/frame/alliance", default-features = false } +pallet-asset-conversion = { path = "substrate/frame/asset-conversion", default-features = false } +pallet-asset-conversion-ops = { path = "substrate/frame/asset-conversion/ops", default-features = false } +pallet-asset-conversion-tx-payment = { path = "substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } +pallet-asset-rate = { path = "substrate/frame/asset-rate", default-features = false } +pallet-asset-tx-payment = { path = "substrate/frame/transaction-payment/asset-tx-payment", default-features = false } +pallet-assets = { path = "substrate/frame/assets", default-features = false } +pallet-assets-freezer = { path = "substrate/frame/assets-freezer", default-features = false } +pallet-atomic-swap = { default-features = false, path = "substrate/frame/atomic-swap" } +pallet-aura = { path = "substrate/frame/aura", default-features = false } +pallet-authority-discovery = { path = "substrate/frame/authority-discovery", default-features = false } +pallet-authorship = { path = "substrate/frame/authorship", default-features = false } +pallet-babe = { path = "substrate/frame/babe", default-features = false } +pallet-bags-list = { path = "substrate/frame/bags-list", default-features = false } +pallet-bags-list-remote-tests = { path = "substrate/frame/bags-list/remote-tests" } +pallet-balances = { path = "substrate/frame/balances", default-features = false } +pallet-beefy = { path = "substrate/frame/beefy", default-features = false } +pallet-beefy-mmr = { path = "substrate/frame/beefy-mmr", default-features = false } +pallet-bounties = { path = "substrate/frame/bounties", default-features = false } +pallet-bridge-grandpa = { path = "bridges/modules/grandpa", default-features = false } +pallet-bridge-messages = { path = "bridges/modules/messages", default-features = false } +pallet-bridge-parachains = { path = "bridges/modules/parachains", default-features = false } +pallet-bridge-relayers = { path = "bridges/modules/relayers", default-features = false } +pallet-broker = { path = "substrate/frame/broker", default-features = false } +pallet-child-bounties = { path = "substrate/frame/child-bounties", default-features = false } +pallet-collator-selection = { path = "cumulus/pallets/collator-selection", default-features = false } +pallet-collective = { path = "substrate/frame/collective", default-features = false } +pallet-collective-content = { path = "cumulus/parachains/pallets/collective-content", default-features = false } +pallet-contracts = { path = "substrate/frame/contracts", default-features = false } +pallet-contracts-fixtures = { path = "substrate/frame/contracts/fixtures" } +pallet-contracts-mock-network = { default-features = false, path = "substrate/frame/contracts/mock-network" } +pallet-contracts-proc-macro = { path = "substrate/frame/contracts/proc-macro", default-features = false } +pallet-contracts-uapi = { path = "substrate/frame/contracts/uapi", default-features = false } +pallet-conviction-voting = { path = "substrate/frame/conviction-voting", default-features = false } +pallet-core-fellowship = { path = "substrate/frame/core-fellowship", default-features = false } +pallet-default-config-example = { path = "substrate/frame/examples/default-config", default-features = false } +pallet-delegated-staking = { path = "substrate/frame/delegated-staking", default-features = false } +pallet-democracy = { path = "substrate/frame/democracy", default-features = false } +pallet-dev-mode = { path = "substrate/frame/examples/dev-mode", default-features = false } +pallet-election-provider-multi-phase = { path = "substrate/frame/election-provider-multi-phase", default-features = false } +pallet-election-provider-support-benchmarking = { path = "substrate/frame/election-provider-support/benchmarking", default-features = false } +pallet-elections-phragmen = { path = "substrate/frame/elections-phragmen", default-features = false } +pallet-example-basic = { path = "substrate/frame/examples/basic", default-features = false } +pallet-example-frame-crate = { path = "substrate/frame/examples/frame-crate", default-features = false } +pallet-example-kitchensink = { path = "substrate/frame/examples/kitchensink", default-features = false } +pallet-example-mbm = { path = "substrate/frame/examples/multi-block-migrations", default-features = false } +pallet-example-offchain-worker = { path = "substrate/frame/examples/offchain-worker", default-features = false } +pallet-example-single-block-migrations = { path = "substrate/frame/examples/single-block-migrations", default-features = false } +pallet-example-split = { path = "substrate/frame/examples/split", default-features = false } +pallet-example-tasks = { path = "substrate/frame/examples/tasks", default-features = false } +pallet-examples = { path = "substrate/frame/examples" } +pallet-fast-unstake = { path = "substrate/frame/fast-unstake", default-features = false } +pallet-glutton = { path = "substrate/frame/glutton", default-features = false } +pallet-grandpa = { path = "substrate/frame/grandpa", default-features = false } +pallet-identity = { path = "substrate/frame/identity", default-features = false } +pallet-im-online = { path = "substrate/frame/im-online", default-features = false } +pallet-indices = { path = "substrate/frame/indices", default-features = false } +pallet-insecure-randomness-collective-flip = { path = "substrate/frame/insecure-randomness-collective-flip", default-features = false } +pallet-lottery = { default-features = false, path = "substrate/frame/lottery" } +pallet-membership = { path = "substrate/frame/membership", default-features = false } +pallet-message-queue = { path = "substrate/frame/message-queue", default-features = false } +pallet-migrations = { path = "substrate/frame/migrations", default-features = false } +pallet-minimal-template = { path = "templates/minimal/pallets/template", default-features = false } +pallet-mixnet = { default-features = false, path = "substrate/frame/mixnet" } +pallet-mmr = { path = "substrate/frame/merkle-mountain-range", default-features = false } +pallet-multisig = { path = "substrate/frame/multisig", default-features = false } +pallet-nft-fractionalization = { path = "substrate/frame/nft-fractionalization", default-features = false } +pallet-nfts = { path = "substrate/frame/nfts", default-features = false } +pallet-nfts-runtime-api = { path = "substrate/frame/nfts/runtime-api", default-features = false } +pallet-nis = { path = "substrate/frame/nis", default-features = false } +pallet-node-authorization = { default-features = false, path = "substrate/frame/node-authorization" } +pallet-nomination-pools = { path = "substrate/frame/nomination-pools", default-features = false } +pallet-nomination-pools-benchmarking = { path = "substrate/frame/nomination-pools/benchmarking", default-features = false } +pallet-nomination-pools-runtime-api = { path = "substrate/frame/nomination-pools/runtime-api", default-features = false } +pallet-offences = { path = "substrate/frame/offences", default-features = false } +pallet-offences-benchmarking = { path = "substrate/frame/offences/benchmarking", default-features = false } +pallet-paged-list = { path = "substrate/frame/paged-list", default-features = false } +pallet-parachain-template = { path = "templates/parachain/pallets/template", default-features = false } +pallet-parameters = { path = "substrate/frame/parameters", default-features = false } +pallet-preimage = { path = "substrate/frame/preimage", default-features = false } +pallet-proxy = { path = "substrate/frame/proxy", default-features = false } +pallet-ranked-collective = { path = "substrate/frame/ranked-collective", default-features = false } +pallet-recovery = { path = "substrate/frame/recovery", default-features = false } +pallet-referenda = { path = "substrate/frame/referenda", default-features = false } +pallet-remark = { default-features = false, path = "substrate/frame/remark" } +pallet-root-offences = { default-features = false, path = "substrate/frame/root-offences" } +pallet-root-testing = { path = "substrate/frame/root-testing", default-features = false } +pallet-safe-mode = { default-features = false, path = "substrate/frame/safe-mode" } +pallet-salary = { path = "substrate/frame/salary", default-features = false } +pallet-scheduler = { path = "substrate/frame/scheduler", default-features = false } +pallet-scored-pool = { default-features = false, path = "substrate/frame/scored-pool" } +pallet-session = { path = "substrate/frame/session", default-features = false } +pallet-session-benchmarking = { path = "substrate/frame/session/benchmarking", default-features = false } +pallet-skip-feeless-payment = { path = "substrate/frame/transaction-payment/skip-feeless-payment", default-features = false } +pallet-society = { path = "substrate/frame/society", default-features = false } +pallet-staking = { path = "substrate/frame/staking", default-features = false } +pallet-staking-reward-curve = { path = "substrate/frame/staking/reward-curve", default-features = false } +pallet-staking-reward-fn = { path = "substrate/frame/staking/reward-fn", default-features = false } +pallet-staking-runtime-api = { path = "substrate/frame/staking/runtime-api", default-features = false } +pallet-state-trie-migration = { path = "substrate/frame/state-trie-migration", default-features = false } +pallet-statement = { default-features = false, path = "substrate/frame/statement" } +pallet-sudo = { path = "substrate/frame/sudo", default-features = false } +pallet-template = { path = "templates/solochain/pallets/template", default-features = false } +pallet-timestamp = { path = "substrate/frame/timestamp", default-features = false } +pallet-tips = { path = "substrate/frame/tips", default-features = false } +pallet-transaction-payment = { path = "substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc = { path = "substrate/frame/transaction-payment/rpc", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-transaction-storage = { default-features = false, path = "substrate/frame/transaction-storage" } +pallet-treasury = { path = "substrate/frame/treasury", default-features = false } +pallet-tx-pause = { default-features = false, path = "substrate/frame/tx-pause" } +pallet-uniques = { path = "substrate/frame/uniques", default-features = false } +pallet-utility = { path = "substrate/frame/utility", default-features = false } +pallet-vesting = { path = "substrate/frame/vesting", default-features = false } +pallet-whitelist = { path = "substrate/frame/whitelist", default-features = false } +pallet-xcm = { path = "polkadot/xcm/pallet-xcm", default-features = false } +pallet-xcm-benchmarks = { path = "polkadot/xcm/pallet-xcm-benchmarks", default-features = false } +pallet-xcm-bridge-hub = { path = "bridges/modules/xcm-bridge-hub", default-features = false } +pallet-xcm-bridge-hub-router = { path = "bridges/modules/xcm-bridge-hub-router", default-features = false } +parachain-info = { path = "cumulus/parachains/pallets/parachain-info", default-features = false, package = "staging-parachain-info" } +parachain-template-runtime = { path = "templates/parachain/runtime" } +parachains-common = { path = "cumulus/parachains/common", default-features = false } +parachains-relay = { path = "bridges/relays/parachains" } +parachains-runtimes-test-utils = { path = "cumulus/parachains/runtimes/test-utils", default-features = false } +parity-bytes = { version = "0.1.2", default-features = false } +parity-db = { version = "0.4.12" } +parity-util-mem = { version = "0.12.0" } +parity-wasm = { version = "0.45.0" } +parking_lot = { version = "0.12.1", default-features = false } +partial_sort = { version = "0.2.0" } +paste = { version = "1.0.14", default-features = false } +pbkdf2 = { version = "0.12.2", default-features = false } +penpal-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal" } +penpal-runtime = { path = "cumulus/parachains/runtimes/testing/penpal" } +people-rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo" } +people-rococo-runtime = { path = "cumulus/parachains/runtimes/people/people-rococo" } +people-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend" } +people-westend-runtime = { path = "cumulus/parachains/runtimes/people/people-westend" } +pin-project = { version = "1.1.3" } +platforms = { version = "3.0" } +polkadot-approval-distribution = { path = "polkadot/node/network/approval-distribution", default-features = false } +polkadot-availability-bitfield-distribution = { path = "polkadot/node/network/bitfield-distribution", default-features = false } +polkadot-availability-distribution = { path = "polkadot/node/network/availability-distribution", default-features = false } +polkadot-availability-recovery = { path = "polkadot/node/network/availability-recovery", default-features = false } +polkadot-cli = { path = "polkadot/cli", default-features = false } +polkadot-collator-protocol = { path = "polkadot/node/network/collator-protocol", default-features = false } +polkadot-core-primitives = { path = "polkadot/core-primitives", default-features = false } +polkadot-dispute-distribution = { path = "polkadot/node/network/dispute-distribution", default-features = false } +polkadot-erasure-coding = { path = "polkadot/erasure-coding", default-features = false } +polkadot-gossip-support = { path = "polkadot/node/network/gossip-support", default-features = false } +polkadot-network-bridge = { path = "polkadot/node/network/bridge", default-features = false } +polkadot-node-collation-generation = { path = "polkadot/node/collation-generation", default-features = false } +polkadot-node-core-approval-voting = { path = "polkadot/node/core/approval-voting", default-features = false } +polkadot-node-core-av-store = { path = "polkadot/node/core/av-store", default-features = false } +polkadot-node-core-backing = { path = "polkadot/node/core/backing", default-features = false } +polkadot-node-core-bitfield-signing = { path = "polkadot/node/core/bitfield-signing", default-features = false } +polkadot-node-core-candidate-validation = { path = "polkadot/node/core/candidate-validation", default-features = false } +polkadot-node-core-chain-api = { path = "polkadot/node/core/chain-api", default-features = false } +polkadot-node-core-chain-selection = { path = "polkadot/node/core/chain-selection", default-features = false } +polkadot-node-core-dispute-coordinator = { path = "polkadot/node/core/dispute-coordinator", default-features = false } +polkadot-node-core-parachains-inherent = { path = "polkadot/node/core/parachains-inherent", default-features = false } +polkadot-node-core-prospective-parachains = { path = "polkadot/node/core/prospective-parachains", default-features = false } +polkadot-node-core-provisioner = { path = "polkadot/node/core/provisioner", default-features = false } +polkadot-node-core-pvf = { path = "polkadot/node/core/pvf", default-features = false } +polkadot-node-core-pvf-checker = { path = "polkadot/node/core/pvf-checker", default-features = false } +polkadot-node-core-pvf-common = { path = "polkadot/node/core/pvf/common", default-features = false } +polkadot-node-core-pvf-execute-worker = { path = "polkadot/node/core/pvf/execute-worker", default-features = false } +polkadot-node-core-pvf-prepare-worker = { path = "polkadot/node/core/pvf/prepare-worker", default-features = false } +polkadot-node-core-runtime-api = { path = "polkadot/node/core/runtime-api", default-features = false } +polkadot-node-jaeger = { path = "polkadot/node/jaeger", default-features = false } +polkadot-node-metrics = { path = "polkadot/node/metrics", default-features = false } +polkadot-node-network-protocol = { path = "polkadot/node/network/protocol", default-features = false } +polkadot-node-primitives = { path = "polkadot/node/primitives", default-features = false } +polkadot-node-subsystem = { path = "polkadot/node/subsystem", default-features = false } +polkadot-node-subsystem-test-helpers = { path = "polkadot/node/subsystem-test-helpers" } +polkadot-node-subsystem-types = { path = "polkadot/node/subsystem-types", default-features = false } +polkadot-node-subsystem-util = { path = "polkadot/node/subsystem-util", default-features = false } +polkadot-overseer = { path = "polkadot/node/overseer", default-features = false } +polkadot-parachain-primitives = { path = "polkadot/parachain", default-features = false } +polkadot-primitives = { path = "polkadot/primitives", default-features = false } +polkadot-primitives-test-helpers = { path = "polkadot/primitives/test-helpers" } +polkadot-rpc = { path = "polkadot/rpc", default-features = false } +polkadot-runtime-common = { path = "polkadot/runtime/common", default-features = false } +polkadot-runtime-metrics = { path = "polkadot/runtime/metrics", default-features = false } +polkadot-runtime-parachains = { path = "polkadot/runtime/parachains", default-features = false } +polkadot-sdk = { path = "umbrella", default-features = false } +polkadot-sdk-docs = { path = "docs/sdk" } +polkadot-service = { path = "polkadot/node/service", default-features = false } +polkadot-statement-distribution = { path = "polkadot/node/network/statement-distribution", default-features = false } +polkadot-statement-table = { path = "polkadot/statement-table", default-features = false } +polkadot-subsystem-bench = { path = "polkadot/node/subsystem-bench" } +polkadot-test-client = { path = "polkadot/node/test/client" } +polkadot-test-runtime = { path = "polkadot/runtime/test-runtime" } +polkadot-test-service = { path = "polkadot/node/test/service" } polkavm = "0.9.3" -polkavm-linker = "0.9.2" polkavm-derive = "0.9.1" -log = { version = "0.4.21", default-features = false } +polkavm-linker = "0.9.2" +portpicker = { version = "0.1.1" } +pretty_assertions = { version = "1.3.0" } +primitive-types = { version = "0.12.1", default-features = false } +proc-macro-crate = { version = "3.0.0" } +proc-macro-warning = { version = "1.0.0", default-features = false } +proc-macro2 = { version = "1.0.64" } +procfs = { version = "0.16.0" } +prometheus = { version = "0.13.0", default-features = false } +prometheus-endpoint = { path = "substrate/utils/prometheus", default-features = false, package = "substrate-prometheus-endpoint" } +prometheus-parse = { version = "0.2.2" } +prost = { version = "0.12.4" } +prost-build = { version = "0.12.4" } +pyroscope = { version = "0.5.7" } +pyroscope_pprofrs = { version = "0.2.7" } +quick_cache = { version = "0.3" } +quickcheck = { version = "1.0.3", default-features = false } quote = { version = "1.0.33" } +rand = { version = "0.8.5", default-features = false } +rand_chacha = { version = "0.3.1", default-features = false } +rand_core = { version = "0.6.2" } +rand_distr = { version = "0.4.3" } +rand_pcg = { version = "0.3.1" } +rayon = { version = "1.5.1" } +rbtag = { version = "0.3" } +ref-cast = { version = "1.0.0" } +regex = { version = "1.10.2" } +relay-substrate-client = { path = "bridges/relays/client-substrate" } +relay-utils = { path = "bridges/relays/utils" } +remote-externalities = { path = "substrate/utils/frame/remote-externalities", default-features = false, package = "frame-remote-externalities" } +reqwest = { version = "0.11", default-features = false } +rlp = { version = "0.5.2", default-features = false } +rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/relays/rococo" } +rococo-parachain-runtime = { path = "cumulus/parachains/runtimes/testing/rococo-parachain" } +rococo-runtime = { path = "polkadot/runtime/rococo" } +rococo-runtime-constants = { path = "polkadot/runtime/rococo/constants", default-features = false } +rococo-system-emulated-network = { path = "cumulus/parachains/integration-tests/emulated/networks/rococo-system" } +rococo-westend-system-emulated-network = { path = "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system" } +rpassword = { version = "7.0.0" } +rstest = { version = "0.18.2" } +rustc-hash = { version = "1.1.0" } +rustc-hex = { version = "2.1.0", default-features = false } +rustix = { version = "0.36.7", default-features = false } +rustversion = { version = "1.0.6" } +rusty-fork = { version = "0.3.0", default-features = false } +safe-mix = { version = "1.0", default-features = false } +sc-allocator = { path = "substrate/client/allocator", default-features = false } +sc-authority-discovery = { path = "substrate/client/authority-discovery", default-features = false } +sc-basic-authorship = { path = "substrate/client/basic-authorship", default-features = false } +sc-block-builder = { path = "substrate/client/block-builder", default-features = false } +sc-chain-spec = { path = "substrate/client/chain-spec", default-features = false } +sc-chain-spec-derive = { path = "substrate/client/chain-spec/derive", default-features = false } +sc-cli = { path = "substrate/client/cli", default-features = false } +sc-client-api = { path = "substrate/client/api", default-features = false } +sc-client-db = { path = "substrate/client/db", default-features = false } +sc-consensus = { path = "substrate/client/consensus/common", default-features = false } +sc-consensus-aura = { path = "substrate/client/consensus/aura", default-features = false } +sc-consensus-babe = { path = "substrate/client/consensus/babe", default-features = false } +sc-consensus-babe-rpc = { path = "substrate/client/consensus/babe/rpc", default-features = false } +sc-consensus-beefy = { path = "substrate/client/consensus/beefy", default-features = false } +sc-consensus-beefy-rpc = { path = "substrate/client/consensus/beefy/rpc", default-features = false } +sc-consensus-epochs = { path = "substrate/client/consensus/epochs", default-features = false } +sc-consensus-grandpa = { path = "substrate/client/consensus/grandpa", default-features = false } +sc-consensus-grandpa-rpc = { path = "substrate/client/consensus/grandpa/rpc", default-features = false } +sc-consensus-manual-seal = { path = "substrate/client/consensus/manual-seal", default-features = false } +sc-consensus-pow = { path = "substrate/client/consensus/pow", default-features = false } +sc-consensus-slots = { path = "substrate/client/consensus/slots", default-features = false } +sc-executor = { path = "substrate/client/executor", default-features = false } +sc-executor-common = { path = "substrate/client/executor/common", default-features = false } +sc-executor-polkavm = { path = "substrate/client/executor/polkavm", default-features = false } +sc-executor-wasmtime = { path = "substrate/client/executor/wasmtime", default-features = false } +sc-informant = { path = "substrate/client/informant", default-features = false } +sc-keystore = { path = "substrate/client/keystore", default-features = false } +sc-mixnet = { path = "substrate/client/mixnet", default-features = false } +sc-network = { path = "substrate/client/network", default-features = false } +sc-network-common = { path = "substrate/client/network/common", default-features = false } +sc-network-gossip = { path = "substrate/client/network-gossip", default-features = false } +sc-network-light = { path = "substrate/client/network/light", default-features = false } +sc-network-statement = { default-features = false, path = "substrate/client/network/statement" } +sc-network-sync = { path = "substrate/client/network/sync", default-features = false } +sc-network-test = { path = "substrate/client/network/test" } +sc-network-transactions = { path = "substrate/client/network/transactions", default-features = false } +sc-network-types = { path = "substrate/client/network/types", default-features = false } +sc-offchain = { path = "substrate/client/offchain", default-features = false } +sc-proposer-metrics = { path = "substrate/client/proposer-metrics", default-features = false } +sc-rpc = { path = "substrate/client/rpc", default-features = false } +sc-rpc-api = { path = "substrate/client/rpc-api", default-features = false } +sc-rpc-server = { path = "substrate/client/rpc-servers", default-features = false } +sc-rpc-spec-v2 = { path = "substrate/client/rpc-spec-v2", default-features = false } +sc-runtime-test = { path = "substrate/client/executor/runtime-test" } +sc-service = { path = "substrate/client/service", default-features = false } +sc-service-test = { path = "substrate/client/service/test" } +sc-state-db = { path = "substrate/client/state-db", default-features = false } +sc-statement-store = { default-features = false, path = "substrate/client/statement-store" } +sc-storage-monitor = { path = "substrate/client/storage-monitor", default-features = false } +sc-sync-state-rpc = { path = "substrate/client/sync-state-rpc", default-features = false } +sc-sysinfo = { path = "substrate/client/sysinfo", default-features = false } +sc-telemetry = { path = "substrate/client/telemetry", default-features = false } +sc-tracing = { path = "substrate/client/tracing", default-features = false } +sc-tracing-proc-macro = { path = "substrate/client/tracing/proc-macro", default-features = false } +sc-transaction-pool = { path = "substrate/client/transaction-pool", default-features = false } +sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api", default-features = false } +sc-utils = { path = "substrate/client/utils", default-features = false } +scale-info = { version = "2.11.1", default-features = false } +schemars = { version = "0.8.13", default-features = false } +schnellru = { version = "0.2.1" } +schnorrkel = { version = "0.11.4", default-features = false } +seccompiler = { version = "0.4.0" } +secp256k1 = { version = "0.28.0", default-features = false } +secrecy = { version = "0.8.0", default-features = false } +seedling-runtime = { path = "cumulus/parachains/runtimes/starters/seedling" } +separator = { version = "0.4.1" } serde = { version = "1.0.197", default-features = false } serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } serde_json = { version = "1.0.114", default-features = false } serde_yaml = { version = "0.9" } +serial_test = { version = "2.0.0" } +sha1 = { version = "0.10.6" } +sha2 = { version = "0.10.7", default-features = false } +sha3 = { version = "0.10.0", default-features = false } +shell-runtime = { path = "cumulus/parachains/runtimes/starters/shell" } +slot-range-helper = { path = "polkadot/runtime/common/slot_range_helper", default-features = false } +slotmap = { version = "1.0" } +smallvec = { version = "1.11.0", default-features = false } +smoldot = { version = "0.11.0", default-features = false } +smoldot-light = { version = "0.9.0", default-features = false } +snowbridge-beacon-primitives = { path = "bridges/snowbridge/primitives/beacon", default-features = false } +snowbridge-core = { path = "bridges/snowbridge/primitives/core", default-features = false } +snowbridge-ethereum = { path = "bridges/snowbridge/primitives/ethereum", default-features = false } +snowbridge-outbound-queue-merkle-tree = { path = "bridges/snowbridge/pallets/outbound-queue/merkle-tree", default-features = false } +snowbridge-outbound-queue-runtime-api = { path = "bridges/snowbridge/pallets/outbound-queue/runtime-api", default-features = false } +snowbridge-pallet-ethereum-client = { path = "bridges/snowbridge/pallets/ethereum-client", default-features = false } +snowbridge-pallet-ethereum-client-fixtures = { path = "bridges/snowbridge/pallets/ethereum-client/fixtures", default-features = false } +snowbridge-pallet-inbound-queue = { path = "bridges/snowbridge/pallets/inbound-queue", default-features = false } +snowbridge-pallet-inbound-queue-fixtures = { path = "bridges/snowbridge/pallets/inbound-queue/fixtures", default-features = false } +snowbridge-pallet-outbound-queue = { path = "bridges/snowbridge/pallets/outbound-queue", default-features = false } +snowbridge-pallet-system = { path = "bridges/snowbridge/pallets/system", default-features = false } +snowbridge-router-primitives = { path = "bridges/snowbridge/primitives/router", default-features = false } +snowbridge-runtime-common = { path = "bridges/snowbridge/runtime/runtime-common", default-features = false } +snowbridge-runtime-test-common = { path = "bridges/snowbridge/runtime/test-common", default-features = false } +snowbridge-system-runtime-api = { path = "bridges/snowbridge/pallets/system/runtime-api", default-features = false } +soketto = { version = "0.7.1" } +solochain-template-runtime = { path = "templates/solochain/runtime" } +sp-api = { path = "substrate/primitives/api", default-features = false } +sp-api-proc-macro = { path = "substrate/primitives/api/proc-macro", default-features = false } +sp-application-crypto = { path = "substrate/primitives/application-crypto", default-features = false } +sp-arithmetic = { path = "substrate/primitives/arithmetic", default-features = false } +sp-authority-discovery = { path = "substrate/primitives/authority-discovery", default-features = false } +sp-block-builder = { path = "substrate/primitives/block-builder", default-features = false } +sp-blockchain = { path = "substrate/primitives/blockchain", default-features = false } +sp-consensus = { path = "substrate/primitives/consensus/common", default-features = false } +sp-consensus-aura = { path = "substrate/primitives/consensus/aura", default-features = false } +sp-consensus-babe = { path = "substrate/primitives/consensus/babe", default-features = false } +sp-consensus-beefy = { path = "substrate/primitives/consensus/beefy", default-features = false } +sp-consensus-grandpa = { path = "substrate/primitives/consensus/grandpa", default-features = false } +sp-consensus-pow = { path = "substrate/primitives/consensus/pow", default-features = false } +sp-consensus-sassafras = { path = "substrate/primitives/consensus/sassafras", default-features = false } +sp-consensus-slots = { path = "substrate/primitives/consensus/slots", default-features = false } +sp-core = { path = "substrate/primitives/core", default-features = false } +sp-core-hashing = { default-features = false, path = "substrate/deprecated/hashing" } +sp-core-hashing-proc-macro = { default-features = false, path = "substrate/deprecated/hashing/proc-macro" } +sp-crypto-ec-utils = { default-features = false, path = "substrate/primitives/crypto/ec-utils" } +sp-crypto-hashing = { path = "substrate/primitives/crypto/hashing", default-features = false } +sp-crypto-hashing-proc-macro = { path = "substrate/primitives/crypto/hashing/proc-macro", default-features = false } +sp-database = { path = "substrate/primitives/database", default-features = false } +sp-debug-derive = { path = "substrate/primitives/debug-derive", default-features = false } +sp-externalities = { path = "substrate/primitives/externalities", default-features = false } +sp-genesis-builder = { path = "substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "substrate/primitives/inherents", default-features = false } +sp-io = { path = "substrate/primitives/io", default-features = false } +sp-keyring = { path = "substrate/primitives/keyring", default-features = false } +sp-keystore = { path = "substrate/primitives/keystore", default-features = false } +sp-maybe-compressed-blob = { path = "substrate/primitives/maybe-compressed-blob", default-features = false } +sp-metadata-ir = { path = "substrate/primitives/metadata-ir", default-features = false } +sp-mixnet = { path = "substrate/primitives/mixnet", default-features = false } +sp-mmr-primitives = { path = "substrate/primitives/merkle-mountain-range", default-features = false } +sp-npos-elections = { path = "substrate/primitives/npos-elections", default-features = false } +sp-offchain = { path = "substrate/primitives/offchain", default-features = false } +sp-panic-handler = { path = "substrate/primitives/panic-handler", default-features = false } +sp-rpc = { path = "substrate/primitives/rpc", default-features = false } +sp-runtime = { path = "substrate/primitives/runtime", default-features = false } +sp-runtime-interface = { path = "substrate/primitives/runtime-interface", default-features = false } +sp-runtime-interface-proc-macro = { path = "substrate/primitives/runtime-interface/proc-macro", default-features = false } +sp-runtime-interface-test-wasm = { path = "substrate/primitives/runtime-interface/test-wasm" } +sp-runtime-interface-test-wasm-deprecated = { path = "substrate/primitives/runtime-interface/test-wasm-deprecated" } +sp-session = { path = "substrate/primitives/session", default-features = false } +sp-staking = { path = "substrate/primitives/staking", default-features = false } +sp-state-machine = { path = "substrate/primitives/state-machine", default-features = false } +sp-statement-store = { path = "substrate/primitives/statement-store", default-features = false } +sp-std = { path = "substrate/primitives/std", default-features = false } +sp-storage = { path = "substrate/primitives/storage", default-features = false } +sp-test-primitives = { path = "substrate/primitives/test-primitives" } +sp-timestamp = { path = "substrate/primitives/timestamp", default-features = false } +sp-tracing = { path = "substrate/primitives/tracing", default-features = false } +sp-transaction-pool = { path = "substrate/primitives/transaction-pool", default-features = false } +sp-transaction-storage-proof = { path = "substrate/primitives/transaction-storage-proof", default-features = false } +sp-trie = { path = "substrate/primitives/trie", default-features = false } +sp-version = { path = "substrate/primitives/version", default-features = false } +sp-version-proc-macro = { path = "substrate/primitives/version/proc-macro", default-features = false } +sp-wasm-interface = { path = "substrate/primitives/wasm-interface", default-features = false } +sp-weights = { path = "substrate/primitives/weights", default-features = false } +spinners = { version = "4.1.0" } +ss58-registry = { version = "1.34.0", default-features = false } +ssz_rs = { version = "0.9.0", default-features = false } +ssz_rs_derive = { version = "0.9.0", default-features = false } +static_assertions = { version = "1.1.0", default-features = false } +static_init = { version = "1.0.3" } +structopt = { version = "0.3" } +strum = { version = "0.26.2", default-features = false } +subkey = { path = "substrate/bin/utils/subkey", default-features = false } +substrate-bip39 = { path = "substrate/utils/substrate-bip39", default-features = false } +substrate-build-script-utils = { path = "substrate/utils/build-script-utils", default-features = false } +substrate-cli-test-utils = { path = "substrate/test-utils/cli" } +substrate-frame-rpc-support = { default-features = false, path = "substrate/utils/frame/rpc/support" } +substrate-frame-rpc-system = { path = "substrate/utils/frame/rpc/system", default-features = false } +substrate-rpc-client = { path = "substrate/utils/frame/rpc/client", default-features = false } +substrate-state-trie-migration-rpc = { path = "substrate/utils/frame/rpc/state-trie-migration-rpc", default-features = false } +substrate-test-client = { path = "substrate/test-utils/client" } +substrate-test-runtime = { path = "substrate/test-utils/runtime" } +substrate-test-runtime-client = { path = "substrate/test-utils/runtime/client" } +substrate-test-runtime-transaction-pool = { path = "substrate/test-utils/runtime/transaction-pool" } +substrate-test-utils = { path = "substrate/test-utils" } +substrate-wasm-builder = { path = "substrate/utils/wasm-builder", default-features = false } syn = { version = "2.0.53" } +sysinfo = { version = "0.30" } +tar = { version = "0.4" } +tempfile = { version = "3.8.1" } +test-log = { version = "0.2.14" } +test-pallet = { path = "substrate/frame/support/test/pallet", default-features = false, package = "frame-support-test-pallet" } +test-parachain-adder = { path = "polkadot/parachain/test-parachains/adder" } +test-parachain-halt = { path = "polkadot/parachain/test-parachains/halt" } +test-parachain-undying = { path = "polkadot/parachain/test-parachains/undying" } +test-runtime-constants = { path = "polkadot/runtime/test-runtime/constants", default-features = false } +testnet-parachains-constants = { path = "cumulus/parachains/runtimes/constants", default-features = false } thiserror = { version = "1.0.48" } +thousands = { version = "0.2.0" } +threadpool = { version = "1.7" } +tikv-jemalloc-ctl = { version = "0.5.0" } +tikv-jemallocator = { version = "0.5.0" } +time = { version = "0.3" } +tiny-keccak = { version = "2.0.2" } +tokio = { version = "1.37.0", default-features = false } +tokio-retry = { version = "0.3.0" } +tokio-stream = { version = "0.1.14" } +tokio-test = { version = "0.4.2" } +tokio-tungstenite = { version = "0.20.1" } +tokio-util = { version = "0.7.8" } +toml = { version = "0.8.8" } +toml_edit = { version = "0.19" } +tower = { version = "0.4.13" } +tower-http = { version = "0.5.2" } +tracing = { version = "0.1.37", default-features = false } +tracing-core = { version = "0.1.32", default-features = false } +tracing-futures = { version = "0.2.4" } +tracing-log = { version = "0.2.0" } tracing-subscriber = { version = "0.3.18" } +tracking-allocator = { path = "polkadot/node/tracking-allocator", default-features = false, package = "staging-tracking-allocator" } +trie-bench = { version = "0.39.0" } +trie-db = { version = "0.29.0", default-features = false } +trie-root = { version = "0.18.0", default-features = false } +trie-standardmap = { version = "0.16.0" } +trybuild = { version = "1.0.88" } +tt-call = { version = "1.0.8" } +tuplex = { version = "0.1", default-features = false } +twox-hash = { version = "1.6.3", default-features = false } +unsigned-varint = { version = "0.7.2" } +url = { version = "2.4.0" } +void = { version = "1.0.2" } +w3f-bls = { version = "0.1.3", default-features = false } +wait-timeout = { version = "0.2" } +walkdir = { version = "2.4.0" } +wasm-bindgen-test = { version = "0.3.19" } +wasm-instrument = { version = "0.4", default-features = false } +wasm-opt = { version = "0.116" } +wasm-timer = { version = "0.2.5" } +wasmi = { version = "0.32.3", default-features = false } +wasmtime = { version = "8.0.1", default-features = false } +wat = { version = "1.0.0" } +westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/relays/westend", default-features = false } +westend-runtime = { path = "polkadot/runtime/westend" } +westend-runtime-constants = { path = "polkadot/runtime/westend/constants", default-features = false } +westend-system-emulated-network = { path = "cumulus/parachains/integration-tests/emulated/networks/westend-system" } +x25519-dalek = { version = "2.0" } +xcm = { path = "polkadot/xcm", default-features = false, package = "staging-xcm" } +xcm-builder = { path = "polkadot/xcm/xcm-builder", default-features = false, package = "staging-xcm-builder" } +xcm-docs = { path = "polkadot/xcm/docs" } +xcm-emulator = { path = "cumulus/xcm/xcm-emulator", default-features = false } +xcm-executor = { path = "polkadot/xcm/xcm-executor", default-features = false, package = "staging-xcm-executor" } +xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false } +xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false } +xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false } +zeroize = { version = "1.7.0", default-features = false } +zstd = { version = "0.12.4", default-features = false } [profile.release] # Polkadot runtime requires unwinding. diff --git a/README.md b/README.md index 0b027b2958c1510ddf2335ec8a815ee89f9330be..92901d070db0854dd4b1daef8c3ed7a3fd3893f2 100644 --- a/README.md +++ b/README.md @@ -24,8 +24,12 @@ forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) ## ๐Ÿ“š Documentation * [๐Ÿฆ€ rust-docs](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html) - * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) - to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM + * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) + to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM + * [Guides](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/index.html), + namely how to build your first FRAME pallet. + * [Templates](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/templates/index.html) + for starting a new project. * Other Resources: * [Polkadot Wiki -> Build](https://wiki.polkadot.network/docs/build-guide) @@ -39,6 +43,9 @@ The Polkadot-SDK has two release channels: `stable` and `nightly`. Production so only use `stable`. `nightly` is meant for tinkerers to try out the latest features. The detailed release process is described in [RELEASE.md](docs/RELEASE.md). +You can use [`psvm`](https://github.com/paritytech/psvm) to manage your Polkadot-SDK dependency +versions in downstream projects. + ### ๐Ÿ˜Œ Stable `stable` releases have a support duration of **three months**. In this period, the release will not diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 783009a8c890768bcc85dafec14dc3da9e8da573..36f27b6aa0358fcb8027bbfe6e571bc1a50962e6 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -11,48 +11,44 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hash-db = { version = "0.16.0", default-features = false } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -static_assertions = { version = "1.1", optional = true } -tuplex = { version = "0.1", default-features = false } +scale-info = { features = ["derive"], workspace = true } +static_assertions = { optional = true, workspace = true, default-features = true } +tuplex = { workspace = true } # Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-parachains = { path = "../../primitives/parachains", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-relayers = { path = "../../primitives/relayers", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-xcm-bridge-hub = { path = "../../primitives/xcm-bridge-hub", default-features = false } -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } -pallet-bridge-grandpa = { path = "../../modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../modules/messages", default-features = false } -pallet-bridge-parachains = { path = "../../modules/parachains", default-features = false } -pallet-bridge-relayers = { path = "../../modules/relayers", default-features = false } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { workspace = true } +bp-xcm-bridge-hub = { workspace = true } +bp-xcm-bridge-hub-router = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +pallet-bridge-messages = { workspace = true } +pallet-bridge-parachains = { workspace = true } +pallet-bridge-relayers = { workspace = true } # Substrate dependencies - -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-utility = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-trie = { optional = true, workspace = true } # Polkadot dependencies -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [dev-dependencies] -bp-test-utils = { path = "../../primitives/test-utils" } -pallet-balances = { path = "../../../substrate/frame/balances" } +bp-test-utils = { workspace = true } +pallet-balances = { workspace = true } +pallet-bridge-messages = { features = ["std", "test-helpers"], workspace = true } [features] default = ["std"] @@ -63,13 +59,14 @@ std = [ "bp-polkadot-core/std", "bp-relayers/std", "bp-runtime/std", + "bp-test-utils/std", "bp-xcm-bridge-hub-router/std", "bp-xcm-bridge-hub/std", "codec/std", "frame-support/std", "frame-system/std", - "hash-db/std", "log/std", + "pallet-balances/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", "pallet-bridge-parachains/std", @@ -77,8 +74,6 @@ std = [ "pallet-transaction-payment/std", "pallet-utility/std", "scale-info/std", - "sp-api/std", - "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", @@ -88,15 +83,22 @@ std = [ "xcm/std", ] runtime-benchmarks = [ + "bp-runtime/test-helpers", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-bridge-grandpa/runtime-benchmarks", "pallet-bridge-messages/runtime-benchmarks", + "pallet-bridge-messages/test-helpers", "pallet-bridge-parachains/runtime-benchmarks", "pallet-bridge-relayers/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "sp-trie", "xcm-builder/runtime-benchmarks", ] integrity-test = ["static_assertions"] +test-helpers = [ + "bp-runtime/test-helpers", + "sp-trie", +] diff --git a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs index 2c152aef68226aee36e791a882b5859427a9a33d..df75092af6e8b89fb76003cf0bfff508214ea825 100644 --- a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs @@ -36,6 +36,12 @@ use sp_runtime::{ transaction_validity::{TransactionPriority, TransactionValidity, ValidTransactionBuilder}, }; +// Re-export to avoid include tuplex dependency everywhere. +#[doc(hidden)] +pub mod __private { + pub use tuplex; +} + /// A duplication of the `FilterCall` trait. /// /// We need this trait in order to be able to implement it for the messages pallet, @@ -313,7 +319,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { info: &sp_runtime::traits::DispatchInfoOf, len: usize, ) -> Result { - use tuplex::PushBack; + use $crate::extensions::check_obsolete_extension::__private::tuplex::PushBack; let to_post_dispatch = (); $( let (from_validate, call_filter_validity) = < @@ -336,7 +342,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { len: usize, result: &sp_runtime::DispatchResult, ) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> { - use tuplex::PopFront; + use $crate::extensions::check_obsolete_extension::__private::tuplex::PopFront; let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(()) }; let has_failed = result.is_err(); $( diff --git a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs index 92810290f95e77a7fdc04cafaa1e6ab290e1661a..9f559dc13b64d3912f0d1679c21fa682034bdb8e 100644 --- a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs @@ -319,6 +319,7 @@ mod integrity_tests { pub mod per_message { use super::*; + use bp_messages::ChainWithMessages; use pallet_bridge_messages::WeightInfoExt; /// Ensures that the value of `PriorityBoostPerMessage` matches the value of @@ -339,7 +340,7 @@ mod integrity_tests { BalanceOf: Send + Sync + FixedPointOperand, { let maximal_messages_in_delivery_transaction = - Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); + Runtime::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; super::ensure_priority_boost_is_sane::>( "PriorityBoostPerMessage", maximal_messages_in_delivery_transaction, diff --git a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs index 5aa7f1c095d540a4ee5050aeb7d694c98b744683..6ba3506377d0e602bf9ee706b13c248efd6afaca 100644 --- a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs @@ -22,9 +22,9 @@ use crate::messages_call_ext::{ CallHelper as MessagesCallHelper, CallInfo as MessagesCallInfo, MessagesCallSubType, }; -use bp_messages::{LaneId, MessageNonce}; +use bp_messages::{ChainWithMessages, LaneId, MessageNonce}; use bp_relayers::{ExplicitOrAccountParams, RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{Parachain, RangeInclusiveExt, StaticStrProvider}; +use bp_runtime::{Chain, Parachain, RangeInclusiveExt, StaticStrProvider}; use codec::{Codec, Decode, Encode}; use frame_support::{ dispatch::{CallableCallFor, DispatchInfo, PostDispatchInfo}, @@ -293,7 +293,7 @@ pub trait RefundSignedExtension: ::Id::get(), ::Instance, - >>::BridgedChainId::get(), + >>::BridgedChain::ID, if call_info.is_receive_messages_proof_call() { RewardsAccountOwner::ThisChain } else { @@ -406,8 +406,7 @@ pub trait RefundSignedExtension: // a quick check to avoid invalid high-priority transactions let max_unconfirmed_messages_in_confirmation_tx = ::Instance, - >>::MaxUnconfirmedMessagesAtInboundLane::get( - ); + >>::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; if bundled_messages > max_unconfirmed_messages_in_confirmation_tx { return None } @@ -935,9 +934,6 @@ where pub(crate) mod tests { use super::*; use crate::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - }, messages_call_ext::{ BaseMessagesProofInfo, ReceiveMessagesDeliveryProofInfo, ReceiveMessagesProofInfo, UnrewardedRelayerOccupation, @@ -946,8 +942,10 @@ pub(crate) mod tests { }; use bp_header_chain::StoredHeaderDataBuilder; use bp_messages::{ - DeliveredMessages, InboundLaneData, MessageNonce, MessagesOperatingMode, OutboundLaneData, - UnrewardedRelayer, UnrewardedRelayersState, + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, DeliveredMessages, InboundLaneData, + MessageNonce, MessagesOperatingMode, OutboundLaneData, UnrewardedRelayer, + UnrewardedRelayersState, }; use bp_parachains::{BestParaHeadHash, ParaInfo}; use bp_polkadot_core::parachains::{ParaHeadsProof, ParaId}; @@ -1123,7 +1121,7 @@ pub(crate) mod tests { ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), [parachain_head_at_relay_header_number as u8; 32].into(), )], - parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, }) } @@ -1136,7 +1134,7 @@ pub(crate) mod tests { ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), [parachain_head_at_relay_header_number as u8; 32].into(), )], - parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, is_free_execution_expected: false, }) } @@ -1144,9 +1142,9 @@ pub(crate) mod tests { fn message_delivery_call(best_message: MessageNonce) -> RuntimeCall { RuntimeCall::BridgeMessages(MessagesCall::receive_messages_proof { relayer_id_at_bridged_chain: relayer_account_at_bridged_chain(), - proof: FromBridgedChainMessagesProof { + proof: Box::new(FromBridgedChainMessagesProof { bridged_header_hash: Default::default(), - storage_proof: vec![], + storage_proof: Default::default(), lane: TestLaneId::get(), nonces_start: pallet_bridge_messages::InboundLanes::::get( TEST_LANE_ID, @@ -1154,7 +1152,7 @@ pub(crate) mod tests { .last_delivered_nonce() + 1, nonces_end: best_message, - }, + }), messages_count: 1, dispatch_weight: Weight::zero(), }) @@ -1164,7 +1162,7 @@ pub(crate) mod tests { RuntimeCall::BridgeMessages(MessagesCall::receive_messages_delivery_proof { proof: FromBridgedChainMessagesDeliveryProof { bridged_header_hash: Default::default(), - storage_proof: vec![], + storage_proof: Default::default(), lane: TestLaneId::get(), }, relayers_state: UnrewardedRelayersState { @@ -1327,8 +1325,10 @@ pub(crate) mod tests { best_stored_nonce: 100, }, unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), + free_relayer_slots: + BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + free_message_slots: + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }), ), @@ -1397,8 +1397,10 @@ pub(crate) mod tests { best_stored_nonce: 100, }, unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), + free_relayer_slots: + BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + free_message_slots: + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }), ), @@ -1459,8 +1461,10 @@ pub(crate) mod tests { best_stored_nonce: 100, }, unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), + free_relayer_slots: + BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + free_message_slots: + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }), ), @@ -1499,8 +1503,10 @@ pub(crate) mod tests { best_stored_nonce: 100, }, unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), + free_relayer_slots: + BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + free_message_slots: + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }, )), @@ -1735,14 +1741,16 @@ pub(crate) mod tests { let fns = [run_validate, run_grandpa_validate, run_messages_validate]; for f in fns { - let priority_of_max_messages_delivery = - f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get())) - .unwrap() - .priority; - let priority_of_more_than_max_messages_delivery = - f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get() + 1)) - .unwrap() - .priority; + let priority_of_max_messages_delivery = f(message_delivery_call( + 100 + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + )) + .unwrap() + .priority; + let priority_of_more_than_max_messages_delivery = f(message_delivery_call( + 100 + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX + 1, + )) + .unwrap() + .priority; assert!( priority_of_max_messages_delivery > priority_of_more_than_max_messages_delivery, @@ -2103,7 +2111,7 @@ pub(crate) mod tests { [1u8; 32].into(), ), ], - parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, }), message_delivery_call(200), ], @@ -2865,7 +2873,8 @@ pub(crate) mod tests { #[test] fn does_not_panic_on_boosting_priority_of_empty_message_delivery_transaction() { run_test(|| { - let best_delivered_message = MaxUnconfirmedMessagesAtInboundLane::get(); + let best_delivered_message = + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; initialize_environment(100, 100, best_delivered_message); // register relayer so it gets priority boost diff --git a/bridges/bin/runtime-common/src/integrity.rs b/bridges/bin/runtime-common/src/integrity.rs index d3827a14dd6cc24e088a8d05d26aba9d769eb213..f661db8a22057ecf6c8b217336e5a98660344cb2 100644 --- a/bridges/bin/runtime-common/src/integrity.rs +++ b/bridges/bin/runtime-common/src/integrity.rs @@ -19,10 +19,9 @@ //! Most of the tests in this module assume that the bridge is using standard (see `crate::messages` //! module for details) configuration. -use crate::{messages, messages::MessageBridge}; - -use bp_messages::{InboundLaneData, MessageNonce}; -use bp_runtime::{Chain, ChainId}; +use bp_header_chain::ChainWithGrandpa; +use bp_messages::{ChainWithMessages, InboundLaneData, MessageNonce}; +use bp_runtime::Chain; use codec::Encode; use frame_support::{storage::generator::StorageValue, traits::Get, weights::Weight}; use frame_system::limits; @@ -50,23 +49,6 @@ macro_rules! assert_chain_types( } ); -/// Macro that ensures that the bridge GRANDPA pallet is configured properly to bridge with given -/// chain. -#[macro_export] -macro_rules! assert_bridge_grandpa_pallet_types( - ( runtime: $r:path, with_bridged_chain_grandpa_instance: $i:path, bridged_chain: $bridged:path ) => { - { - // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard - // configuration is used), or something has broke existing configuration (meaning that all bridged chains - // and relays will stop functioning) - use pallet_bridge_grandpa::Config as GrandpaConfig; - use static_assertions::assert_type_eq_all; - - assert_type_eq_all!(<$r as GrandpaConfig<$i>>::BridgedChain, $bridged); - } - } -); - /// Macro that ensures that the bridge messages pallet is configured properly to bridge using given /// configuration. #[macro_export] @@ -74,32 +56,30 @@ macro_rules! assert_bridge_messages_pallet_types( ( runtime: $r:path, with_bridged_chain_messages_instance: $i:path, - bridge: $bridge:path + this_chain: $this:path, + bridged_chain: $bridged:path, ) => { { // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard // configuration is used), or something has broke existing configuration (meaning that all bridged chains // and relays will stop functioning) - use $crate::messages::{ - source::{FromThisChainMessagePayload, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagePayload, SourceHeaderChainAdapter}, - AccountIdOf, BalanceOf, BridgedChain, ThisChain, - }; + use $crate::messages_xcm_extension::XcmAsPlainPayload; + use bp_messages::ChainWithMessages; + use bp_runtime::Chain; use pallet_bridge_messages::Config as MessagesConfig; use static_assertions::assert_type_eq_all; - assert_type_eq_all!(<$r as MessagesConfig<$i>>::OutboundPayload, FromThisChainMessagePayload); + assert_type_eq_all!(<$r as MessagesConfig<$i>>::ThisChain, $this); + assert_type_eq_all!(<$r as MessagesConfig<$i>>::BridgedChain, $bridged); - assert_type_eq_all!(<$r as MessagesConfig<$i>>::InboundRelayer, AccountIdOf>); - - assert_type_eq_all!(<$r as MessagesConfig<$i>>::TargetHeaderChain, TargetHeaderChainAdapter<$bridge>); - assert_type_eq_all!(<$r as MessagesConfig<$i>>::SourceHeaderChain, SourceHeaderChainAdapter<$bridge>); + assert_type_eq_all!(<$r as MessagesConfig<$i>>::OutboundPayload, XcmAsPlainPayload); + assert_type_eq_all!(<$r as MessagesConfig<$i>>::InboundPayload, XcmAsPlainPayload); } } ); /// Macro that combines four other macro calls - `assert_chain_types`, `assert_bridge_types`, -/// `assert_bridge_grandpa_pallet_types` and `assert_bridge_messages_pallet_types`. It may be used +/// and `assert_bridge_messages_pallet_types`. It may be used /// at the chain that is implementing complete standard messages bridge (i.e. with bridge GRANDPA /// and messages pallets deployed). #[macro_export] @@ -108,20 +88,15 @@ macro_rules! assert_complete_bridge_types( runtime: $r:path, with_bridged_chain_grandpa_instance: $gi:path, with_bridged_chain_messages_instance: $mi:path, - bridge: $bridge:path, this_chain: $this:path, bridged_chain: $bridged:path, ) => { $crate::assert_chain_types!(runtime: $r, this_chain: $this); - $crate::assert_bridge_grandpa_pallet_types!( - runtime: $r, - with_bridged_chain_grandpa_instance: $gi, - bridged_chain: $bridged - ); $crate::assert_bridge_messages_pallet_types!( runtime: $r, with_bridged_chain_messages_instance: $mi, - bridge: $bridge + this_chain: $this, + bridged_chain: $bridged, ); } ); @@ -184,20 +159,8 @@ where ); } -/// Parameters for asserting messages pallet constants. -#[derive(Debug)] -pub struct AssertBridgeMessagesPalletConstants { - /// Maximal number of unrewarded relayer entries in a confirmation transaction at the bridged - /// chain. - pub max_unrewarded_relayers_in_bridged_confirmation_tx: MessageNonce, - /// Maximal number of unconfirmed messages in a confirmation transaction at the bridged chain. - pub max_unconfirmed_messages_in_bridged_confirmation_tx: MessageNonce, - /// Identifier of the bridged chain. - pub bridged_chain_id: ChainId, -} - /// Test that the constants, used in messages pallet configuration are valid. -pub fn assert_bridge_messages_pallet_constants(params: AssertBridgeMessagesPalletConstants) +pub fn assert_bridge_messages_pallet_constants() where R: pallet_bridge_messages::Config, MI: 'static, @@ -207,27 +170,22 @@ where "ActiveOutboundLanes ({:?}) must not be empty", R::ActiveOutboundLanes::get(), ); + assert!( - R::MaxUnrewardedRelayerEntriesAtInboundLane::get() <= params.max_unrewarded_relayers_in_bridged_confirmation_tx, - "MaxUnrewardedRelayerEntriesAtInboundLane ({}) must be <= than the hardcoded value for bridged chain: {}", - R::MaxUnrewardedRelayerEntriesAtInboundLane::get(), - params.max_unrewarded_relayers_in_bridged_confirmation_tx, - ); - assert!( - R::MaxUnconfirmedMessagesAtInboundLane::get() <= params.max_unconfirmed_messages_in_bridged_confirmation_tx, - "MaxUnrewardedRelayerEntriesAtInboundLane ({}) must be <= than the hardcoded value for bridged chain: {}", - R::MaxUnconfirmedMessagesAtInboundLane::get(), - params.max_unconfirmed_messages_in_bridged_confirmation_tx, + pallet_bridge_messages::BridgedChainOf::::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX + <= pallet_bridge_messages::BridgedChainOf::::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + "MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX ({}) of {:?} is larger than \ + its MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX ({}). This makes \ + no sense", + pallet_bridge_messages::BridgedChainOf::::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + pallet_bridge_messages::BridgedChainOf::::ID, + pallet_bridge_messages::BridgedChainOf::::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, ); - assert_eq!(R::BridgedChainId::get(), params.bridged_chain_id); } /// Parameters for asserting bridge pallet names. #[derive(Debug)] pub struct AssertBridgePalletNames<'a> { - /// Name of the messages pallet, deployed at the bridged chain and used to bridge with this - /// chain. - pub with_this_chain_messages_pallet_name: &'a str, /// Name of the GRANDPA pallet, deployed at this chain and used to bridge with the bridged /// chain. pub with_bridged_chain_grandpa_pallet_name: &'a str, @@ -238,18 +196,22 @@ pub struct AssertBridgePalletNames<'a> { /// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants /// from chain primitives crates. -pub fn assert_bridge_pallet_names(params: AssertBridgePalletNames) +fn assert_bridge_pallet_names(params: AssertBridgePalletNames) where - B: MessageBridge, R: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, GI: 'static, MI: 'static, { - assert_eq!(B::BRIDGED_MESSAGES_PALLET_NAME, params.with_this_chain_messages_pallet_name); + // check that the bridge GRANDPA pallet has required name assert_eq!( pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), - bp_runtime::storage_value_key(params.with_bridged_chain_grandpa_pallet_name, "PalletOwner",).0, + bp_runtime::storage_value_key( + params.with_bridged_chain_grandpa_pallet_name, + "PalletOwner", + ).0, ); + + // check that the bridge messages pallet has required name assert_eq!( pallet_bridge_messages::PalletOwner::::storage_value_final_key().to_vec(), bp_runtime::storage_value_key( @@ -262,35 +224,58 @@ where /// Parameters for asserting complete standard messages bridge. #[derive(Debug)] -pub struct AssertCompleteBridgeConstants<'a> { +pub struct AssertCompleteBridgeConstants { /// Parameters to assert this chain constants. pub this_chain_constants: AssertChainConstants, - /// Parameters to assert messages pallet constants. - pub messages_pallet_constants: AssertBridgeMessagesPalletConstants, - /// Parameters to assert pallet names constants. - pub pallet_names: AssertBridgePalletNames<'a>, } -/// All bridge-related constants tests for the complete standard messages bridge (i.e. with bridge -/// GRANDPA and messages pallets deployed). -pub fn assert_complete_bridge_constants(params: AssertCompleteBridgeConstants) -where +/// All bridge-related constants tests for the complete standard relay-chain messages bridge +/// (i.e. with bridge GRANDPA and messages pallets deployed). +pub fn assert_complete_with_relay_chain_bridge_constants( + params: AssertCompleteBridgeConstants, +) where + R: frame_system::Config + + pallet_bridge_grandpa::Config + + pallet_bridge_messages::Config, + GI: 'static, + MI: 'static, +{ + assert_chain_constants::(params.this_chain_constants); + assert_bridge_grandpa_pallet_constants::(); + assert_bridge_messages_pallet_constants::(); + assert_bridge_pallet_names::(AssertBridgePalletNames { + with_bridged_chain_grandpa_pallet_name: + >::BridgedChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + with_bridged_chain_messages_pallet_name: + >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + }); +} + +/// All bridge-related constants tests for the complete standard parachain messages bridge +/// (i.e. with bridge GRANDPA, parachains and messages pallets deployed). +pub fn assert_complete_with_parachain_bridge_constants( + params: AssertCompleteBridgeConstants, +) where R: frame_system::Config + pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, GI: 'static, MI: 'static, - B: MessageBridge, + RelayChain: ChainWithGrandpa, { assert_chain_constants::(params.this_chain_constants); assert_bridge_grandpa_pallet_constants::(); - assert_bridge_messages_pallet_constants::(params.messages_pallet_constants); - assert_bridge_pallet_names::(params.pallet_names); + assert_bridge_messages_pallet_constants::(); + assert_bridge_pallet_names::(AssertBridgePalletNames { + with_bridged_chain_grandpa_pallet_name: RelayChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + with_bridged_chain_messages_pallet_name: + >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + }); } /// Check that the message lane weights are correct. pub fn check_message_lane_weights< - C: Chain, + C: ChainWithMessages, T: frame_system::Config + pallet_bridge_messages::Config, MessagesPalletInstance: 'static, >( @@ -308,14 +293,20 @@ pub fn check_message_lane_weights< // check basic weight assumptions pallet_bridge_messages::ensure_weights_are_correct::>(); + // check that the maximal message dispatch weight is below hardcoded limit + pallet_bridge_messages::ensure_maximal_message_dispatch::>( + C::maximal_incoming_message_size(), + C::maximal_incoming_message_dispatch_weight(), + ); + // check that weights allow us to receive messages - let max_incoming_message_proof_size = bridged_chain_extra_storage_proof_size - .saturating_add(messages::target::maximal_incoming_message_size(C::max_extrinsic_size())); + let max_incoming_message_proof_size = + bridged_chain_extra_storage_proof_size.saturating_add(C::maximal_incoming_message_size()); pallet_bridge_messages::ensure_able_to_receive_message::>( C::max_extrinsic_size(), C::max_extrinsic_weight(), max_incoming_message_proof_size, - messages::target::maximal_incoming_message_dispatch_weight(C::max_extrinsic_weight()), + C::maximal_incoming_message_dispatch_weight(), ); // check that weights allow us to receive delivery confirmations diff --git a/bridges/bin/runtime-common/src/lib.rs b/bridges/bin/runtime-common/src/lib.rs index 5679acd6006ccb8540f940f0f90363f902d643f7..b65bb6041d5610ce2bdfb63f923f3f24b21dcd7f 100644 --- a/bridges/bin/runtime-common/src/lib.rs +++ b/bridges/bin/runtime-common/src/lib.rs @@ -20,11 +20,10 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod extensions; -pub mod messages; + pub mod messages_api; pub mod messages_benchmarking; pub mod messages_call_ext; -pub mod messages_generation; pub mod messages_xcm_extension; pub mod parachains_benchmarking; diff --git a/bridges/bin/runtime-common/src/messages.rs b/bridges/bin/runtime-common/src/messages.rs deleted file mode 100644 index 0fe9935dbdb6dfc776977ff8cfbad87d3eee9f6e..0000000000000000000000000000000000000000 --- a/bridges/bin/runtime-common/src/messages.rs +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that allow runtime to act as a source/target endpoint of message lanes. -//! -//! Messages are assumed to be encoded `Call`s of the target chain. Call-dispatch -//! pallet is used to dispatch incoming messages. Message identified by a tuple -//! of to elements - message lane id and message nonce. - -pub use bp_runtime::{RangeInclusiveExt, UnderlyingChainOf, UnderlyingChainProvider}; - -use bp_header_chain::HeaderChain; -use bp_messages::{ - source_chain::TargetHeaderChain, - target_chain::{ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData, - VerificationError, -}; -use bp_runtime::{Chain, RawStorageProof, Size, StorageProofChecker}; -use codec::{Decode, Encode}; -use frame_support::{traits::Get, weights::Weight}; -use hash_db::Hasher; -use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::{marker::PhantomData, vec::Vec}; - -/// Bidirectional message bridge. -pub trait MessageBridge { - /// Name of the paired messages pallet instance at the Bridged chain. - /// - /// Should be the name that is used in the `construct_runtime!()` macro. - const BRIDGED_MESSAGES_PALLET_NAME: &'static str; - - /// This chain in context of message bridge. - type ThisChain: ThisChainWithMessages; - /// Bridged chain in context of message bridge. - type BridgedChain: BridgedChainWithMessages; - /// Bridged header chain. - type BridgedHeaderChain: HeaderChain>; -} - -/// This chain that has `pallet-bridge-messages` module. -pub trait ThisChainWithMessages: UnderlyingChainProvider { - /// Call origin on the chain. - type RuntimeOrigin; -} - -/// Bridged chain that has `pallet-bridge-messages` module. -pub trait BridgedChainWithMessages: UnderlyingChainProvider {} - -/// This chain in context of message bridge. -pub type ThisChain = ::ThisChain; -/// Bridged chain in context of message bridge. -pub type BridgedChain = ::BridgedChain; -/// Hash used on the chain. -pub type HashOf = bp_runtime::HashOf<::Chain>; -/// Hasher used on the chain. -pub type HasherOf = bp_runtime::HasherOf>; -/// Account id used on the chain. -pub type AccountIdOf = bp_runtime::AccountIdOf>; -/// Type of balances that is used on the chain. -pub type BalanceOf = bp_runtime::BalanceOf>; - -/// Sub-module that is declaring types required for processing This -> Bridged chain messages. -pub mod source { - use super::*; - - /// Message payload for This -> Bridged chain messages. - pub type FromThisChainMessagePayload = crate::messages_xcm_extension::XcmAsPlainPayload; - - /// Maximal size of outbound message payload. - pub struct FromThisChainMaximalOutboundPayloadSize(PhantomData); - - impl Get for FromThisChainMaximalOutboundPayloadSize { - fn get() -> u32 { - maximal_message_size::() - } - } - - /// Messages delivery proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of inbound lane state; - /// - lane id. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] - pub struct FromBridgedChainMessagesDeliveryProof { - /// Hash of the bridge header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// Storage trie proof generated for [`Self::bridged_header_hash`]. - pub storage_proof: RawStorageProof, - /// Lane id of which messages were delivered and the proof is for. - pub lane: LaneId, - } - - impl Size for FromBridgedChainMessagesDeliveryProof { - fn size(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// 'Parsed' message delivery proof - inbound lane id and its state. - pub type ParsedMessagesDeliveryProofFromBridgedChain = - (LaneId, InboundLaneData>>); - - /// Return maximal message size of This -> Bridged chain message. - pub fn maximal_message_size() -> u32 { - super::target::maximal_incoming_message_size( - UnderlyingChainOf::>::max_extrinsic_size(), - ) - } - - /// `TargetHeaderChain` implementation that is using default types and perform default checks. - pub struct TargetHeaderChainAdapter(PhantomData); - - impl TargetHeaderChain>> - for TargetHeaderChainAdapter - { - type MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof>>; - - fn verify_message(payload: &FromThisChainMessagePayload) -> Result<(), VerificationError> { - verify_chain_message::(payload) - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData>>), VerificationError> { - verify_messages_delivery_proof::(proof) - } - } - - /// Do basic Bridged-chain specific verification of This -> Bridged chain message. - /// - /// Ok result from this function means that the delivery transaction with this message - /// may be 'mined' by the target chain. - pub fn verify_chain_message( - payload: &FromThisChainMessagePayload, - ) -> Result<(), VerificationError> { - // IMPORTANT: any error that is returned here is fatal for the bridge, because - // this code is executed at the bridge hub and message sender actually lives - // at some sibling parachain. So we are failing **after** the message has been - // sent and we can't report it back to sender (unless error report mechanism is - // embedded into message and its dispatcher). - - // apart from maximal message size check (see below), we should also check the message - // dispatch weight here. But we assume that the bridged chain will just push the message - // to some queue (XCMP, UMP, DMP), so the weight is constant and fits the block. - - // The maximal size of extrinsic at Substrate-based chain depends on the - // `frame_system::Config::MaximumBlockLength` and - // `frame_system::Config::AvailableBlockRatio` constants. This check is here to be sure that - // the lane won't stuck because message is too large to fit into delivery transaction. - // - // **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not - // the message itself. The proof is always larger than the message. But unless chain state - // is enormously large, it should be several dozens/hundreds of bytes. The delivery - // transaction also contains signatures and signed extensions. Because of this, we reserve - // 1/3 of the the maximal extrinsic size for this data. - if payload.len() > maximal_message_size::() as usize { - return Err(VerificationError::MessageTooLarge) - } - - Ok(()) - } - - /// Verify proof of This -> Bridged chain messages delivery. - /// - /// This function is used when Bridged chain is directly using GRANDPA finality. For Bridged - /// parachains, please use the `verify_messages_delivery_proof_from_parachain`. - pub fn verify_messages_delivery_proof( - proof: FromBridgedChainMessagesDeliveryProof>>, - ) -> Result, VerificationError> { - let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } = - proof; - let mut storage = - B::BridgedHeaderChain::storage_proof_checker(bridged_header_hash, storage_proof) - .map_err(VerificationError::HeaderChain)?; - // Messages delivery proof is just proof of single storage key read => any error - // is fatal. - let storage_inbound_lane_data_key = bp_messages::storage_keys::inbound_lane_data_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - &lane, - ); - let inbound_lane_data = storage - .read_and_decode_mandatory_value(storage_inbound_lane_data_key.0.as_ref()) - .map_err(VerificationError::InboundLaneStorage)?; - - // check that the storage proof doesn't have any untouched trie nodes - storage.ensure_no_unused_nodes().map_err(VerificationError::StorageProof)?; - - Ok((lane, inbound_lane_data)) - } -} - -/// Sub-module that is declaring types required for processing Bridged -> This chain messages. -pub mod target { - use super::*; - - /// Decoded Bridged -> This message payload. - pub type FromBridgedChainMessagePayload = crate::messages_xcm_extension::XcmAsPlainPayload; - - /// Messages proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of messages and (optionally) outbound lane state; - /// - lane id; - /// - nonces (inclusive range) of messages which are included in this proof. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] - pub struct FromBridgedChainMessagesProof { - /// Hash of the finalized bridged header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// A storage trie proof of messages being delivered. - pub storage_proof: RawStorageProof, - /// Messages in this proof are sent over this lane. - pub lane: LaneId, - /// Nonce of the first message being delivered. - pub nonces_start: MessageNonce, - /// Nonce of the last message being delivered. - pub nonces_end: MessageNonce, - } - - impl Size for FromBridgedChainMessagesProof { - fn size(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// Return maximal dispatch weight of the message we're able to receive. - pub fn maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { - maximal_extrinsic_weight / 2 - } - - /// Return maximal message size given maximal extrinsic size. - pub fn maximal_incoming_message_size(maximal_extrinsic_size: u32) -> u32 { - maximal_extrinsic_size / 3 * 2 - } - - /// `SourceHeaderChain` implementation that is using default types and perform default checks. - pub struct SourceHeaderChainAdapter(PhantomData); - - impl SourceHeaderChain for SourceHeaderChainAdapter { - type MessagesProof = FromBridgedChainMessagesProof>>; - - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result, VerificationError> { - verify_messages_proof::(proof, messages_count) - } - } - - /// Verify proof of Bridged -> This chain messages. - /// - /// This function is used when Bridged chain is directly using GRANDPA finality. For Bridged - /// parachains, please use the `verify_messages_proof_from_parachain`. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside of this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - pub fn verify_messages_proof( - proof: FromBridgedChainMessagesProof>>, - messages_count: u32, - ) -> Result, VerificationError> { - let FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane, - nonces_start, - nonces_end, - } = proof; - let storage = - B::BridgedHeaderChain::storage_proof_checker(bridged_header_hash, storage_proof) - .map_err(VerificationError::HeaderChain)?; - let mut parser = StorageProofCheckerAdapter::<_, B> { storage, _dummy: Default::default() }; - let nonces_range = nonces_start..=nonces_end; - - // receiving proofs where end < begin is ok (if proof includes outbound lane state) - let messages_in_the_proof = nonces_range.checked_len().unwrap_or(0); - if messages_in_the_proof != MessageNonce::from(messages_count) { - return Err(VerificationError::MessagesCountMismatch) - } - - // Read messages first. All messages that are claimed to be in the proof must - // be in the proof. So any error in `read_value`, or even missing value is fatal. - // - // Mind that we allow proofs with no messages if outbound lane state is proved. - let mut messages = Vec::with_capacity(messages_in_the_proof as _); - for nonce in nonces_range { - let message_key = MessageKey { lane_id: lane, nonce }; - let message_payload = parser.read_and_decode_message_payload(&message_key)?; - messages.push(Message { key: message_key, payload: message_payload }); - } - - // Now let's check if proof contains outbound lane state proof. It is optional, so - // we simply ignore `read_value` errors and missing value. - let proved_lane_messages = ProvedLaneMessages { - lane_state: parser.read_and_decode_outbound_lane_data(&lane)?, - messages, - }; - - // Now we may actually check if the proof is empty or not. - if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() { - return Err(VerificationError::EmptyMessageProof) - } - - // check that the storage proof doesn't have any untouched trie nodes - parser - .storage - .ensure_no_unused_nodes() - .map_err(VerificationError::StorageProof)?; - - // We only support single lane messages in this generated_schema - let mut proved_messages = ProvedMessages::new(); - proved_messages.insert(lane, proved_lane_messages); - - Ok(proved_messages) - } - - struct StorageProofCheckerAdapter { - storage: StorageProofChecker, - _dummy: sp_std::marker::PhantomData, - } - - impl StorageProofCheckerAdapter { - fn read_and_decode_outbound_lane_data( - &mut self, - lane_id: &LaneId, - ) -> Result, VerificationError> { - let storage_outbound_lane_data_key = bp_messages::storage_keys::outbound_lane_data_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - lane_id, - ); - - self.storage - .read_and_decode_opt_value(storage_outbound_lane_data_key.0.as_ref()) - .map_err(VerificationError::OutboundLaneStorage) - } - - fn read_and_decode_message_payload( - &mut self, - message_key: &MessageKey, - ) -> Result { - let storage_message_key = bp_messages::storage_keys::message_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - &message_key.lane_id, - message_key.nonce, - ); - self.storage - .read_and_decode_mandatory_value(storage_message_key.0.as_ref()) - .map_err(VerificationError::MessageStorage) - } - } -} - -/// The `BridgeMessagesCall` used by a chain. -pub type BridgeMessagesCallOf = bp_messages::BridgeMessagesCall< - bp_runtime::AccountIdOf, - target::FromBridgedChainMessagesProof>, - source::FromBridgedChainMessagesDeliveryProof>, ->; - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - messages_generation::{ - encode_all_messages, encode_lane_data, prepare_messages_storage_proof, - }, - mock::*, - }; - use bp_header_chain::{HeaderChainError, StoredHeaderDataBuilder}; - use bp_runtime::{HeaderId, StorageProofError}; - use codec::Encode; - use sp_core::H256; - use sp_runtime::traits::Header as _; - - #[test] - fn verify_chain_message_rejects_message_with_too_large_declared_weight() { - assert!(source::verify_chain_message::(&vec![ - 42; - BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT - - 1 - ]) - .is_err()); - } - - #[test] - fn verify_chain_message_rejects_message_too_large_message() { - assert!(source::verify_chain_message::(&vec![ - 0; - source::maximal_message_size::() - as usize + 1 - ],) - .is_err()); - } - - #[test] - fn verify_chain_message_accepts_maximal_message() { - assert_eq!( - source::verify_chain_message::(&vec![ - 0; - source::maximal_message_size::() - as _ - ],), - Ok(()), - ); - } - - fn using_messages_proof( - nonces_end: MessageNonce, - outbound_lane_data: Option, - encode_message: impl Fn(MessageNonce, &MessagePayload) -> Option>, - encode_outbound_lane_data: impl Fn(&OutboundLaneData) -> Vec, - test: impl Fn(target::FromBridgedChainMessagesProof) -> R, - ) -> R { - let (state_root, storage_proof) = prepare_messages_storage_proof::( - TEST_LANE_ID, - 1..=nonces_end, - outbound_lane_data, - bp_runtime::StorageProofSize::Minimal(0), - vec![42], - encode_message, - encode_outbound_lane_data, - ); - - sp_io::TestExternalities::new(Default::default()).execute_with(move || { - let bridged_header = BridgedChainHeader::new( - 0, - Default::default(), - state_root, - Default::default(), - Default::default(), - ); - let bridged_header_hash = bridged_header.hash(); - - pallet_bridge_grandpa::BestFinalized::::put(HeaderId( - 0, - bridged_header_hash, - )); - pallet_bridge_grandpa::ImportedHeaders::::insert( - bridged_header_hash, - bridged_header.build(), - ); - test(target::FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane: TEST_LANE_ID, - nonces_start: 1, - nonces_end, - }) - }) - } - - #[test] - fn messages_proof_is_rejected_if_declared_less_than_actual_number_of_messages() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - target::verify_messages_proof::(proof, 5) - }), - Err(VerificationError::MessagesCountMismatch), - ); - } - - #[test] - fn messages_proof_is_rejected_if_declared_more_than_actual_number_of_messages() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - target::verify_messages_proof::(proof, 15) - }), - Err(VerificationError::MessagesCountMismatch), - ); - } - - #[test] - fn message_proof_is_rejected_if_header_is_missing_from_the_chain() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - let bridged_header_hash = - pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; - pallet_bridge_grandpa::ImportedHeaders::::remove(bridged_header_hash); - target::verify_messages_proof::(proof, 10) - }), - Err(VerificationError::HeaderChain(HeaderChainError::UnknownHeader)), - ); - } - - #[test] - fn message_proof_is_rejected_if_header_state_root_mismatches() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - let bridged_header_hash = - pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; - pallet_bridge_grandpa::ImportedHeaders::::insert( - bridged_header_hash, - BridgedChainHeader::new( - 0, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ) - .build(), - ); - target::verify_messages_proof::(proof, 10) - }), - Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( - StorageProofError::StorageRootMismatch - ))), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_has_duplicate_trie_nodes() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |mut proof| { - let node = proof.storage_proof.pop().unwrap(); - proof.storage_proof.push(node.clone()); - proof.storage_proof.push(node); - target::verify_messages_proof::(proof, 10) - },), - Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( - StorageProofError::DuplicateNodesInProof - ))), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_has_unused_trie_nodes() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |mut proof| { - proof.storage_proof.push(vec![42]); - target::verify_messages_proof::(proof, 10) - },), - Err(VerificationError::StorageProof(StorageProofError::UnusedNodesInTheProof)), - ); - } - - #[test] - fn message_proof_is_rejected_if_required_message_is_missing() { - matches!( - using_messages_proof( - 10, - None, - |n, m| if n != 5 { Some(m.encode()) } else { None }, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 10) - ), - Err(VerificationError::MessageStorage(StorageProofError::StorageValueEmpty)), - ); - } - - #[test] - fn message_proof_is_rejected_if_message_decode_fails() { - matches!( - using_messages_proof( - 10, - None, - |n, m| { - let mut m = m.encode(); - if n == 5 { - m = vec![42] - } - Some(m) - }, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 10), - ), - Err(VerificationError::MessageStorage(StorageProofError::StorageValueDecodeFailed(_))), - ); - } - - #[test] - fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() { - matches!( - using_messages_proof( - 10, - Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - encode_all_messages, - |d| { - let mut d = d.encode(); - d.truncate(1); - d - }, - |proof| target::verify_messages_proof::(proof, 10), - ), - Err(VerificationError::OutboundLaneStorage( - StorageProofError::StorageValueDecodeFailed(_) - )), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_is_empty() { - assert_eq!( - using_messages_proof(0, None, encode_all_messages, encode_lane_data, |proof| { - target::verify_messages_proof::(proof, 0) - },), - Err(VerificationError::EmptyMessageProof), - ); - } - - #[test] - fn non_empty_message_proof_without_messages_is_accepted() { - assert_eq!( - using_messages_proof( - 0, - Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - encode_all_messages, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 0), - ), - Ok(vec![( - TEST_LANE_ID, - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: Vec::new(), - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn non_empty_message_proof_is_accepted() { - assert_eq!( - using_messages_proof( - 1, - Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - encode_all_messages, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 1), - ), - Ok(vec![( - TEST_LANE_ID, - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: vec![Message { - key: MessageKey { lane_id: TEST_LANE_ID, nonce: 1 }, - payload: vec![42], - }], - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn verify_messages_proof_does_not_panic_if_messages_count_mismatches() { - assert_eq!( - using_messages_proof(1, None, encode_all_messages, encode_lane_data, |mut proof| { - proof.nonces_end = u64::MAX; - target::verify_messages_proof::(proof, u32::MAX) - },), - Err(VerificationError::MessagesCountMismatch), - ); - } -} diff --git a/bridges/bin/runtime-common/src/messages_benchmarking.rs b/bridges/bin/runtime-common/src/messages_benchmarking.rs index 74494f7908045fac601b4c3f64a456ad12dacd6f..1880e65547fe6d5e0af71e6ec7c6e0e214f20866 100644 --- a/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ b/bridges/bin/runtime-common/src/messages_benchmarking.rs @@ -19,23 +19,22 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - AccountIdOf, BridgedChain, HashOf, MessageBridge, ThisChain, - }, +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, MessagePayload, +}; +use bp_polkadot_core::parachains::ParaHash; +use bp_runtime::{AccountIdOf, Chain, HashOf, Parachain}; +use codec::Encode; +use frame_support::weights::Weight; +use pallet_bridge_messages::{ + benchmarking::{MessageDeliveryProofParams, MessageProofParams}, messages_generation::{ encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, prepare_messages_storage_proof, }, + BridgedChainOf, ThisChainOf, }; - -use bp_messages::MessagePayload; -use bp_polkadot_core::parachains::ParaHash; -use bp_runtime::{Chain, Parachain, StorageProofSize, UnderlyingChainOf}; -use codec::Encode; -use frame_support::weights::Weight; -use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams}; use sp_runtime::traits::{Header, Zero}; use sp_std::prelude::*; use xcm::latest::prelude::*; @@ -45,11 +44,7 @@ fn prepare_inbound_message( params: &MessageProofParams, successful_dispatch_message_generator: impl Fn(usize) -> MessagePayload, ) -> MessagePayload { - // we only care about **this** message size when message proof needs to be `Minimal` - let expected_size = match params.size { - StorageProofSize::Minimal(size) => size as usize, - _ => 0, - }; + let expected_size = params.proof_params.db_size.unwrap_or(0) as usize; // if we don't need a correct message, then we may just return some random blob if !params.is_successful_dispatch_expected { @@ -75,25 +70,32 @@ fn prepare_inbound_message( /// This method is intended to be used when benchmarking pallet, linked to the chain that /// uses GRANDPA finality. For parachains, please use the `prepare_message_proof_from_parachain` /// function. -pub fn prepare_message_proof_from_grandpa_chain( +pub fn prepare_message_proof_from_grandpa_chain( params: MessageProofParams, message_generator: impl Fn(usize) -> MessagePayload, -) -> (FromBridgedChainMessagesProof>>, Weight) +) -> (FromBridgedChainMessagesProof>>, Weight) where - R: pallet_bridge_grandpa::Config>>, + R: pallet_bridge_grandpa::Config> + + pallet_bridge_messages::Config< + MI, + BridgedHeaderChain = pallet_bridge_grandpa::Pallet, + >, FI: 'static, - B: MessageBridge, + MI: 'static, { // prepare storage proof - let (state_root, storage_proof) = prepare_messages_storage_proof::( - params.lane, - params.message_nonces.clone(), - params.outbound_lane_data.clone(), - params.size, - prepare_inbound_message(¶ms, message_generator), - encode_all_messages, - encode_lane_data, - ); + let (state_root, storage_proof) = + prepare_messages_storage_proof::, ThisChainOf>( + params.lane, + params.message_nonces.clone(), + params.outbound_lane_data.clone(), + params.proof_params, + |_| prepare_inbound_message(¶ms, &message_generator), + encode_all_messages, + encode_lane_data, + false, + false, + ); // update runtime storage let (_, bridged_header_hash) = insert_header_to_grandpa_pallet::(state_root); @@ -118,30 +120,33 @@ where /// This method is intended to be used when benchmarking pallet, linked to the chain that /// uses parachain finality. For GRANDPA chains, please use the /// `prepare_message_proof_from_grandpa_chain` function. -pub fn prepare_message_proof_from_parachain( +pub fn prepare_message_proof_from_parachain( params: MessageProofParams, message_generator: impl Fn(usize) -> MessagePayload, -) -> (FromBridgedChainMessagesProof>>, Weight) +) -> (FromBridgedChainMessagesProof>>, Weight) where - R: pallet_bridge_parachains::Config, + R: pallet_bridge_parachains::Config + pallet_bridge_messages::Config, PI: 'static, - B: MessageBridge, - UnderlyingChainOf>: Chain + Parachain, + MI: 'static, + BridgedChainOf: Chain + Parachain, { // prepare storage proof - let (state_root, storage_proof) = prepare_messages_storage_proof::( - params.lane, - params.message_nonces.clone(), - params.outbound_lane_data.clone(), - params.size, - prepare_inbound_message(¶ms, message_generator), - encode_all_messages, - encode_lane_data, - ); + let (state_root, storage_proof) = + prepare_messages_storage_proof::, ThisChainOf>( + params.lane, + params.message_nonces.clone(), + params.outbound_lane_data.clone(), + params.proof_params, + |_| prepare_inbound_message(¶ms, &message_generator), + encode_all_messages, + encode_lane_data, + false, + false, + ); // update runtime storage let (_, bridged_header_hash) = - insert_header_to_parachains_pallet::>>(state_root); + insert_header_to_parachains_pallet::>(state_root); ( FromBridgedChainMessagesProof { @@ -160,21 +165,24 @@ where /// This method is intended to be used when benchmarking pallet, linked to the chain that /// uses GRANDPA finality. For parachains, please use the /// `prepare_message_delivery_proof_from_parachain` function. -pub fn prepare_message_delivery_proof_from_grandpa_chain( - params: MessageDeliveryProofParams>>, -) -> FromBridgedChainMessagesDeliveryProof>> +pub fn prepare_message_delivery_proof_from_grandpa_chain( + params: MessageDeliveryProofParams>>, +) -> FromBridgedChainMessagesDeliveryProof>> where - R: pallet_bridge_grandpa::Config>>, + R: pallet_bridge_grandpa::Config> + + pallet_bridge_messages::Config< + MI, + BridgedHeaderChain = pallet_bridge_grandpa::Pallet, + >, FI: 'static, - B: MessageBridge, + MI: 'static, { // prepare storage proof let lane = params.lane; - let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( - params.lane, - params.inbound_lane_data, - params.size, - ); + let (state_root, storage_proof) = prepare_message_delivery_storage_proof::< + BridgedChainOf, + ThisChainOf, + >(params.lane, params.inbound_lane_data, params.proof_params); // update runtime storage let (_, bridged_header_hash) = insert_header_to_grandpa_pallet::(state_root); @@ -191,26 +199,25 @@ where /// This method is intended to be used when benchmarking pallet, linked to the chain that /// uses parachain finality. For GRANDPA chains, please use the /// `prepare_message_delivery_proof_from_grandpa_chain` function. -pub fn prepare_message_delivery_proof_from_parachain( - params: MessageDeliveryProofParams>>, -) -> FromBridgedChainMessagesDeliveryProof>> +pub fn prepare_message_delivery_proof_from_parachain( + params: MessageDeliveryProofParams>>, +) -> FromBridgedChainMessagesDeliveryProof>> where - R: pallet_bridge_parachains::Config, + R: pallet_bridge_parachains::Config + pallet_bridge_messages::Config, PI: 'static, - B: MessageBridge, - UnderlyingChainOf>: Chain + Parachain, + MI: 'static, + BridgedChainOf: Chain + Parachain, { // prepare storage proof let lane = params.lane; - let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( - params.lane, - params.inbound_lane_data, - params.size, - ); + let (state_root, storage_proof) = prepare_message_delivery_storage_proof::< + BridgedChainOf, + ThisChainOf, + >(params.lane, params.inbound_lane_data, params.proof_params); // update runtime storage let (_, bridged_header_hash) = - insert_header_to_parachains_pallet::>>(state_root); + insert_header_to_parachains_pallet::>(state_root); FromBridgedChainMessagesDeliveryProof { bridged_header_hash: bridged_header_hash.into(), diff --git a/bridges/bin/runtime-common/src/messages_call_ext.rs b/bridges/bin/runtime-common/src/messages_call_ext.rs index fb07f7b6dd69110918af23b227708e226bede625..a9ee1969ae0ca462f36098f03b4454e1399af129 100644 --- a/bridges/bin/runtime-common/src/messages_call_ext.rs +++ b/bridges/bin/runtime-common/src/messages_call_ext.rs @@ -14,19 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Signed extension for the `pallet-bridge-messages` that is able to reject obsolete -//! (and some other invalid) transactions. +//! Helpers for easier manipulation of call processing with signed extensions. -use crate::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, +use bp_messages::{ + target_chain::MessageDispatch, ChainWithMessages, InboundLaneData, LaneId, MessageNonce, }; -use bp_messages::{target_chain::MessageDispatch, InboundLaneData, LaneId, MessageNonce}; -use bp_runtime::OwnedBridgeModule; -use frame_support::{ - dispatch::CallableCallFor, - traits::{Get, IsSubType}, -}; -use pallet_bridge_messages::{Config, Pallet}; +use bp_runtime::{AccountIdOf, OwnedBridgeModule}; +use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; +use pallet_bridge_messages::{BridgedChainOf, Config, Pallet}; use sp_runtime::{transaction_validity::TransactionValidity, RuntimeDebug}; use sp_std::ops::RangeInclusive; @@ -213,18 +208,8 @@ pub trait MessagesCallSubType, I: 'static>: } impl< - BridgedHeaderHash, - SourceHeaderChain: bp_messages::target_chain::SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof, - >, - TargetHeaderChain: bp_messages::source_chain::TargetHeaderChain< - >::OutboundPayload, - ::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, - >, Call: IsSubType, T>>, - T: frame_system::Config - + Config, + T: frame_system::Config + Config, I: 'static, > MessagesCallSubType for T::RuntimeCall { @@ -340,16 +325,17 @@ impl< /// Returns occupation state of unrewarded relayers vector. fn unrewarded_relayers_occupation, I: 'static>( - inbound_lane_data: &InboundLaneData, + inbound_lane_data: &InboundLaneData>>, ) -> UnrewardedRelayerOccupation { UnrewardedRelayerOccupation { - free_relayer_slots: T::MaxUnrewardedRelayerEntriesAtInboundLane::get() + free_relayer_slots: T::BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX .saturating_sub(inbound_lane_data.relayers.len() as MessageNonce), free_message_slots: { let unconfirmed_messages = inbound_lane_data .last_delivered_nonce() .saturating_sub(inbound_lane_data.last_confirmed_nonce); - T::MaxUnconfirmedMessagesAtInboundLane::get().saturating_sub(unconfirmed_messages) + T::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX + .saturating_sub(unconfirmed_messages) }, } } @@ -358,22 +344,20 @@ fn unrewarded_relayers_occupation, I: 'static>( mod tests { use super::*; use crate::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - }, messages_call_ext::MessagesCallSubType, - mock::{ - DummyMessageDispatch, MaxUnconfirmedMessagesAtInboundLane, - MaxUnrewardedRelayerEntriesAtInboundLane, TestRuntime, ThisChainRuntimeCall, - }, + mock::{BridgedUnderlyingChain, DummyMessageDispatch, TestRuntime, ThisChainRuntimeCall}, + }; + use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, DeliveredMessages, UnrewardedRelayer, + UnrewardedRelayersState, }; - use bp_messages::{DeliveredMessages, UnrewardedRelayer, UnrewardedRelayersState}; use sp_std::ops::RangeInclusive; fn fill_unrewarded_relayers() { let mut inbound_lane_state = pallet_bridge_messages::InboundLanes::::get(LaneId([0, 0, 0, 0])); - for n in 0..MaxUnrewardedRelayerEntriesAtInboundLane::get() { + for n in 0..BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX { inbound_lane_state.relayers.push_back(UnrewardedRelayer { relayer: Default::default(), messages: DeliveredMessages { begin: n + 1, end: n + 1 }, @@ -392,7 +376,7 @@ mod tests { relayer: Default::default(), messages: DeliveredMessages { begin: 1, - end: MaxUnconfirmedMessagesAtInboundLane::get(), + end: BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }); pallet_bridge_messages::InboundLanes::::insert( @@ -418,13 +402,13 @@ mod tests { messages_count: nonces_end.checked_sub(nonces_start).map(|x| x + 1).unwrap_or(0) as u32, dispatch_weight: frame_support::weights::Weight::zero(), - proof: FromBridgedChainMessagesProof { + proof: Box::new(FromBridgedChainMessagesProof { bridged_header_hash: Default::default(), - storage_proof: vec![], + storage_proof: Default::default(), lane: LaneId([0, 0, 0, 0]), nonces_start, nonces_end, - }, + }), }, ) .check_obsolete_call() @@ -508,8 +492,8 @@ mod tests { sp_io::TestExternalities::new(Default::default()).execute_with(|| { fill_unrewarded_messages(); assert!(validate_message_delivery( - MaxUnconfirmedMessagesAtInboundLane::get(), - MaxUnconfirmedMessagesAtInboundLane::get() - 1 + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX - 1 )); }); } @@ -540,7 +524,7 @@ mod tests { pallet_bridge_messages::Call::::receive_messages_delivery_proof { proof: FromBridgedChainMessagesDeliveryProof { bridged_header_hash: Default::default(), - storage_proof: Vec::new(), + storage_proof: Default::default(), lane: LaneId([0, 0, 0, 0]), }, relayers_state: UnrewardedRelayersState { @@ -608,7 +592,7 @@ mod tests { free_message_slots: if is_empty { 0 } else { - MaxUnconfirmedMessagesAtInboundLane::get() + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX }, }, }, diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index f49474667896049cfd6aff4bf4a4b0d9d6e73c95..2f248a7162a6cbdcc09d2980a922b1a065127e40 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -18,26 +18,16 @@ #![cfg(test)] -use crate::messages::{ - source::{ - FromThisChainMaximalOutboundPayloadSize, FromThisChainMessagePayload, - TargetHeaderChainAdapter, - }, - target::{FromBridgedChainMessagePayload, SourceHeaderChainAdapter}, - BridgedChainWithMessages, HashOf, MessageBridge, ThisChainWithMessages, -}; +use crate::messages_xcm_extension::XcmAsPlainPayload; -use bp_header_chain::{ChainWithGrandpa, HeaderChain}; +use bp_header_chain::ChainWithGrandpa; use bp_messages::{ target_chain::{DispatchMessage, MessageDispatch}, - LaneId, MessageNonce, + ChainWithMessages, LaneId, MessageNonce, }; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_relayers::PayRewardFromAccount; -use bp_runtime::{ - messages::MessageDispatchResult, Chain, ChainId, Parachain, UnderlyingChainProvider, -}; -use codec::{Decode, Encode}; +use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, Parachain}; use frame_support::{ derive_impl, parameter_types, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight}, @@ -46,7 +36,7 @@ use pallet_transaction_payment::Multiplier; use sp_runtime::{ testing::H256, traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8}, - FixedPointNumber, Perquintill, + FixedPointNumber, Perquintill, StateVersion, }; /// Account identifier at `ThisChain`. @@ -61,8 +51,6 @@ pub type ThisChainHash = H256; pub type ThisChainHasher = BlakeTwo256; /// Runtime call at `ThisChain`. pub type ThisChainRuntimeCall = RuntimeCall; -/// Runtime call origin at `ThisChain`. -pub type ThisChainCallOrigin = RuntimeOrigin; /// Header of `ThisChain`. pub type ThisChainHeader = sp_runtime::generic::Header; /// Block of `ThisChain`. @@ -100,8 +88,6 @@ pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< pub const TEST_LANE_ID: LaneId = LaneId([0, 0, 0, 0]); /// Bridged chain id used in tests. pub const TEST_BRIDGED_CHAIN_ID: ChainId = *b"brdg"; -/// Maximal extrinsic weight at the `BridgedChain`. -pub const BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT: usize = 2048; /// Maximal extrinsic size at the `BridgedChain`. pub const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024; @@ -126,7 +112,6 @@ crate::generate_bridge_reject_obsolete_headers_and_messages! { parameter_types! { pub const ActiveOutboundLanes: &'static [LaneId] = &[TEST_LANE_ID]; - pub const BridgedChainId: ChainId = TEST_BRIDGED_CHAIN_ID; pub const BridgedParasPalletName: &'static str = "Paras"; pub const ExistentialDeposit: ThisChainBalance = 500; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 }; @@ -136,8 +121,6 @@ parameter_types! { pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000); pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128); pub MaximumMultiplier: Multiplier = sp_runtime::traits::Bounded::max_value(); - pub const MaxUnrewardedRelayerEntriesAtInboundLane: MessageNonce = 16; - pub const MaxUnconfirmedMessagesAtInboundLane: MessageNonce = 1_000; pub const ReserveId: [u8; 8] = *b"brdgrlrs"; } @@ -203,17 +186,12 @@ impl pallet_bridge_messages::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type WeightInfo = pallet_bridge_messages::weights::BridgeWeight; type ActiveOutboundLanes = ActiveOutboundLanes; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = FromThisChainMaximalOutboundPayloadSize; - type OutboundPayload = FromThisChainMessagePayload; + type OutboundPayload = XcmAsPlainPayload; - type InboundPayload = FromBridgedChainMessagePayload; - type InboundRelayer = BridgedChainAccountId; + type InboundPayload = Vec; type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), @@ -221,9 +199,11 @@ impl pallet_bridge_messages::Config for TestRuntime { >; type OnMessagesDelivered = (); - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = DummyMessageDispatch; - type BridgedChainId = BridgedChainId; + + type ThisChain = ThisUnderlyingChain; + type BridgedChain = BridgedUnderlyingChain; + type BridgedHeaderChain = BridgeGrandpa; } impl pallet_bridge_relayers::Config for TestRuntime { @@ -262,55 +242,6 @@ impl MessageDispatch for DummyMessageDispatch { } } -/// Bridge that is deployed on `ThisChain` and allows sending/receiving messages to/from -/// `BridgedChain`. -#[derive(Debug, PartialEq, Eq)] -pub struct OnThisChainBridge; - -impl MessageBridge for OnThisChainBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = ThisChain; - type BridgedChain = BridgedChain; - type BridgedHeaderChain = pallet_bridge_grandpa::GrandpaChainHeaders; -} - -/// Bridge that is deployed on `BridgedChain` and allows sending/receiving messages to/from -/// `ThisChain`. -#[derive(Debug, PartialEq, Eq)] -pub struct OnBridgedChainBridge; - -impl MessageBridge for OnBridgedChainBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = BridgedChain; - type BridgedChain = ThisChain; - type BridgedHeaderChain = ThisHeaderChain; -} - -/// Dummy implementation of `HeaderChain` for `ThisChain` at the `BridgedChain`. -pub struct ThisHeaderChain; - -impl HeaderChain for ThisHeaderChain { - fn finalized_header_state_root(_hash: HashOf) -> Option> { - unreachable!() - } -} - -/// Call origin at `BridgedChain`. -#[derive(Clone, Debug)] -pub struct BridgedChainOrigin; - -impl From - for Result, BridgedChainOrigin> -{ - fn from( - _origin: BridgedChainOrigin, - ) -> Result, BridgedChainOrigin> { - unreachable!() - } -} - /// Underlying chain of `ThisChain`. pub struct ThisUnderlyingChain; @@ -326,6 +257,8 @@ impl Chain for ThisUnderlyingChain { type Nonce = u32; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE } @@ -335,29 +268,20 @@ impl Chain for ThisUnderlyingChain { } } -/// The chain where we are in tests. -pub struct ThisChain; - -impl UnderlyingChainProvider for ThisChain { - type Chain = ThisUnderlyingChain; -} +impl ChainWithMessages for ThisUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; -impl ThisChainWithMessages for ThisChain { - type RuntimeOrigin = ThisChainCallOrigin; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; } -impl BridgedChainWithMessages for ThisChain {} - /// Underlying chain of `BridgedChain`. pub struct BridgedUnderlyingChain; /// Some parachain under `BridgedChain` consensus. pub struct BridgedUnderlyingParachain; -/// Runtime call of the `BridgedChain`. -#[derive(Decode, Encode)] -pub struct BridgedChainCall; impl Chain for BridgedUnderlyingChain { - const ID: ChainId = *b"buch"; + const ID: ChainId = TEST_BRIDGED_CHAIN_ID; type BlockNumber = BridgedChainBlockNumber; type Hash = BridgedChainHash; @@ -368,6 +292,8 @@ impl Chain for BridgedUnderlyingChain { type Nonce = u32; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE } @@ -384,6 +310,12 @@ impl ChainWithGrandpa for BridgedUnderlyingChain { const AVERAGE_HEADER_SIZE: u32 = 64; } +impl ChainWithMessages for BridgedUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; +} + impl Chain for BridgedUnderlyingParachain { const ID: ChainId = *b"bupc"; @@ -396,6 +328,8 @@ impl Chain for BridgedUnderlyingParachain { type Nonce = u32; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE } @@ -409,19 +343,6 @@ impl Parachain for BridgedUnderlyingParachain { const MAX_HEADER_SIZE: u32 = 1_024; } -/// The other, bridged chain, used in tests. -pub struct BridgedChain; - -impl UnderlyingChainProvider for BridgedChain { - type Chain = BridgedUnderlyingChain; -} - -impl ThisChainWithMessages for BridgedChain { - type RuntimeOrigin = BridgedChainOrigin; -} - -impl BridgedChainWithMessages for BridgedChain {} - /// Run test within test externalities. pub fn run_test(test: impl FnOnce()) { sp_io::TestExternalities::new(Default::default()).execute_with(test) diff --git a/bridges/bin/runtime-common/src/parachains_benchmarking.rs b/bridges/bin/runtime-common/src/parachains_benchmarking.rs index b3050b9ac0f3ccec617399d3eb91647dcab7eb3d..bcbd779b44dea5fbef7781335cfa1d359ab8c1f1 100644 --- a/bridges/bin/runtime-common/src/parachains_benchmarking.rs +++ b/bridges/bin/runtime-common/src/parachains_benchmarking.rs @@ -18,14 +18,11 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::{ - messages_benchmarking::insert_header_to_grandpa_pallet, - messages_generation::grow_trie_leaf_value, -}; +use crate::messages_benchmarking::insert_header_to_grandpa_pallet; use bp_parachains::parachain_head_storage_key_at_source; use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; -use bp_runtime::{record_all_trie_keys, StorageProofSize}; +use bp_runtime::{grow_storage_value, record_all_trie_keys, Chain, UnverifiedStorageProofParams}; use codec::Encode; use frame_support::traits::Get; use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; @@ -39,14 +36,14 @@ use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; pub fn prepare_parachain_heads_proof( parachains: &[ParaId], parachain_head_size: u32, - size: StorageProofSize, + proof_params: UnverifiedStorageProofParams, ) -> (RelayBlockNumber, RelayBlockHash, ParaHeadsProof, Vec<(ParaId, ParaHash)>) where R: pallet_bridge_parachains::Config + pallet_bridge_grandpa::Config, PI: 'static, >::BridgedChain: - bp_runtime::Chain, + Chain, { let parachain_head = ParaHead(vec![0u8; parachain_head_size as usize]); @@ -64,7 +61,7 @@ where let storage_key = parachain_head_storage_key_at_source(R::ParasPalletName::get(), *parachain); let leaf_data = if i == 0 { - grow_trie_leaf_value(parachain_head.encode(), size) + grow_storage_value(parachain_head.encode(), &proof_params) } else { parachain_head.encode() }; diff --git a/bridges/chains/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml index d9afe2c8bf76713104beead1ad4c36dc08dae1ed..b765fbc57bb0aec2eb4544a01dd84c057ced16ef 100644 --- a/bridges/chains/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -11,14 +11,14 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-support = { workspace = true } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml index 4b3ed052f1382d0c7f076ad5152c861f60d8bef1..ff89864fb2db4667d7929ef540ad4ec46cf2a36c 100644 --- a/bridges/chains/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -11,14 +11,14 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-support = { workspace = true } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml index 4b900002a4d81abb9d7364f555a150a2af6c839c..5609398385f98b9d3731b196a057e939b1c099de 100644 --- a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml @@ -13,19 +13,19 @@ workspace = true [dependencies] # Bridge Dependencies -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-polkadot-core = { workspace = true } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-system = { workspace = true } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } # Polkadot Dependencies -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +polkadot-primitives = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml index ff6dd8849abe3897f1c3eb3cb1de8b7d89af5ca7..605643b0a4eb7e5d514edb1d4f1ad9db65a2c8ec 100644 --- a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml @@ -13,16 +13,16 @@ workspace = true [dependencies] # Bridge Dependencies -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } +bp-bridge-hub-cumulus = { workspace = true } +bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs index ef3ef4ab7b7a9bc111218e3c53091ac232f34721..c990e8a12f367cafbd35b0693b323a6ec5fb5e96 100644 --- a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs @@ -29,7 +29,7 @@ use frame_support::{ dispatch::DispatchClass, sp_runtime::{MultiAddress, MultiSigner}, }; -use sp_runtime::RuntimeDebug; +use sp_runtime::{RuntimeDebug, StateVersion}; /// BridgeHubKusama parachain. #[derive(RuntimeDebug)] @@ -48,6 +48,8 @@ impl Chain for BridgeHubKusama { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } diff --git a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml index da8b8a82fa702eeab719335fa9968b78ee965163..97e36a19c748c0e5da990eda75bdbed9aa444e6f 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml @@ -14,16 +14,16 @@ workspace = true # Bridge Dependencies -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } +bp-bridge-hub-cumulus = { workspace = true } +bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs index 9db71af928e5df01170cf4ab8bf5f20cd72f7610..7379b8863b1de5c1a1482db90077e958f0a33366 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs @@ -26,7 +26,7 @@ use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, }; use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; +use sp_runtime::{RuntimeDebug, StateVersion}; /// BridgeHubPolkadot parachain. #[derive(RuntimeDebug)] @@ -45,6 +45,8 @@ impl Chain for BridgeHubPolkadot { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } diff --git a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml index f7672df012f2fc2a21cfc987468427a3222317ea..5c918470322353c32556c3e5d381fb2ea713b2ab 100644 --- a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml @@ -13,16 +13,16 @@ workspace = true [dependencies] # Bridge Dependencies -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } +bp-bridge-hub-cumulus = { workspace = true } +bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs index d7097f01c5316a58851f400a86b98eda3d7e8bcc..73af997b9950ef640040e44cbba0b93b6a7a56a3 100644 --- a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs @@ -25,8 +25,10 @@ use bp_messages::*; use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, }; -use frame_support::dispatch::DispatchClass; -use sp_runtime::{MultiAddress, MultiSigner, RuntimeDebug}; +use frame_support::{ + dispatch::DispatchClass, + sp_runtime::{MultiAddress, MultiSigner, RuntimeDebug, StateVersion}, +}; /// BridgeHubRococo parachain. #[derive(RuntimeDebug)] @@ -45,6 +47,8 @@ impl Chain for BridgeHubRococo { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } @@ -103,10 +107,10 @@ frame_support::parameter_types! { pub const BridgeHubRococoBaseXcmFeeInRocs: u128 = 59_034_266; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single inbound message. - /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) + /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_standalone_message_delivery_transaction` + `33%`) pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 314_037_860; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. - /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) + /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_standalone_message_confirmation_transaction` + `33%`) pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 57_414_813; } diff --git a/bridges/chains/chain-bridge-hub-westend/Cargo.toml b/bridges/chains/chain-bridge-hub-westend/Cargo.toml index ec74c4b947d693dba92d4da5051526e49349e0a5..0b429ab9a0bd9793a9129ed8483a608f71bfb44c 100644 --- a/bridges/chains/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-westend/Cargo.toml @@ -14,16 +14,16 @@ workspace = true # Bridge Dependencies -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } +bp-bridge-hub-cumulus = { workspace = true } +bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-westend/src/lib.rs b/bridges/chains/chain-bridge-hub-westend/src/lib.rs index 800f290d7bfa41cec4139e80a7dc9ea8962a6da5..17ff2c858a1d3eeae329cb972d95adc32952ede4 100644 --- a/bridges/chains/chain-bridge-hub-westend/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-westend/src/lib.rs @@ -25,7 +25,7 @@ use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, }; use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; +use sp_runtime::{RuntimeDebug, StateVersion}; /// BridgeHubWestend parachain. #[derive(RuntimeDebug)] @@ -44,6 +44,8 @@ impl Chain for BridgeHubWestend { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } diff --git a/bridges/chains/chain-kusama/Cargo.toml b/bridges/chains/chain-kusama/Cargo.toml index 66061ff2793cbdd3419fa8894ab78e37486102ea..ec45c1eddce5d3b7be1f3a8ae9b83ca6332e7c28 100644 --- a/bridges/chains/chain-kusama/Cargo.toml +++ b/bridges/chains/chain-kusama/Cargo.toml @@ -14,15 +14,15 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs index fd7172c5869d468ff534e54f9ef6278cf86a88ed..dcd0b23abbbefa2dfba741d6934b5d5510c93017 100644 --- a/bridges/chains/chain-kusama/src/lib.rs +++ b/bridges/chains/chain-kusama/src/lib.rs @@ -23,7 +23,7 @@ pub use bp_polkadot_core::*; use bp_header_chain::ChainWithGrandpa; use bp_runtime::{decl_bridge_finality_runtime_apis, Chain, ChainId}; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; /// Kusama Chain pub struct Kusama; @@ -41,6 +41,8 @@ impl Chain for Kusama { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V0; + fn max_extrinsic_size() -> u32 { max_extrinsic_size() } diff --git a/bridges/chains/chain-polkadot-bulletin/Cargo.toml b/bridges/chains/chain-polkadot-bulletin/Cargo.toml index 700247b7055a891bec2d4a40bfd126720a0d952c..ea5f4d2e77591bd8840e869f9b3567df5d56fd56 100644 --- a/bridges/chains/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/chains/chain-polkadot-bulletin/Cargo.toml @@ -11,23 +11,23 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs index f3d300567f2b4f92cec272e0929a3c53d718c823..88980a9575016bd5c5e1428329454e8131a2075d 100644 --- a/bridges/chains/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -37,7 +37,9 @@ use frame_support::{ }; use frame_system::limits; use scale_info::TypeInfo; -use sp_runtime::{traits::DispatchInfoOf, transaction_validity::TransactionValidityError, Perbill}; +use sp_runtime::{ + traits::DispatchInfoOf, transaction_validity::TransactionValidityError, Perbill, StateVersion, +}; // This chain reuses most of Polkadot primitives. pub use bp_polkadot_core::{ @@ -192,6 +194,8 @@ impl Chain for PolkadotBulletin { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } diff --git a/bridges/chains/chain-polkadot/Cargo.toml b/bridges/chains/chain-polkadot/Cargo.toml index c700935f3083b5f287277c7d9975be53352b2506..50f637af4251c8a7ed822861281a217ec12bdb28 100644 --- a/bridges/chains/chain-polkadot/Cargo.toml +++ b/bridges/chains/chain-polkadot/Cargo.toml @@ -14,15 +14,15 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs index a8cac0467d574e9355a8fe9ba2e7c2378019349d..f4b262d40735d7470a4d7e289f24bc1d4556d039 100644 --- a/bridges/chains/chain-polkadot/src/lib.rs +++ b/bridges/chains/chain-polkadot/src/lib.rs @@ -25,7 +25,7 @@ use bp_header_chain::ChainWithGrandpa; use bp_runtime::{ decl_bridge_finality_runtime_apis, extensions::PrevalidateAttests, Chain, ChainId, }; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; /// Polkadot Chain pub struct Polkadot; @@ -43,6 +43,8 @@ impl Chain for Polkadot { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V0; + fn max_extrinsic_size() -> u32 { max_extrinsic_size() } diff --git a/bridges/chains/chain-rococo/Cargo.toml b/bridges/chains/chain-rococo/Cargo.toml index 5a5613bb376a5a4f75c773b3350993262149f973..49a1a397ee096532cfc0b5d3a42cf14469f8ed46 100644 --- a/bridges/chains/chain-rococo/Cargo.toml +++ b/bridges/chains/chain-rococo/Cargo.toml @@ -14,15 +14,15 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs index b290fe71c829d08130556a2b061c0d63f0787d4c..bfcafdf41ea2e629c9a58f2545016b2e776375b8 100644 --- a/bridges/chains/chain-rococo/src/lib.rs +++ b/bridges/chains/chain-rococo/src/lib.rs @@ -23,7 +23,7 @@ pub use bp_polkadot_core::*; use bp_header_chain::ChainWithGrandpa; use bp_runtime::{decl_bridge_finality_runtime_apis, Chain, ChainId}; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; /// Rococo Chain pub struct Rococo; @@ -41,6 +41,8 @@ impl Chain for Rococo { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { max_extrinsic_size() } diff --git a/bridges/chains/chain-westend/Cargo.toml b/bridges/chains/chain-westend/Cargo.toml index 10b06d76507ef95bbff00f5560b705ecee1ec4ce..5e27bc647bfc5f07d5ab029307ef6dcf67121fb3 100644 --- a/bridges/chains/chain-westend/Cargo.toml +++ b/bridges/chains/chain-westend/Cargo.toml @@ -14,15 +14,15 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs index ef451f7de0a9640bc1a278e1c712bbb099193ceb..2a247e03e59d666d3c5dd54d74e3a4f852a60bd3 100644 --- a/bridges/chains/chain-westend/src/lib.rs +++ b/bridges/chains/chain-westend/src/lib.rs @@ -23,7 +23,7 @@ pub use bp_polkadot_core::*; use bp_header_chain::ChainWithGrandpa; use bp_runtime::{decl_bridge_finality_runtime_apis, Chain, ChainId}; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; /// Westend Chain pub struct Westend; @@ -41,6 +41,8 @@ impl Chain for Westend { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { max_extrinsic_size() } diff --git a/bridges/modules/beefy/Cargo.toml b/bridges/modules/beefy/Cargo.toml index e36bbb615f23a20d4ef4a4f4ea8418e752d5b01f..cffc62d290828f032c5c57f27982e7f60f9b94ef 100644 --- a/bridges/modules/beefy/Cargo.toml +++ b/bridges/modules/beefy/Cargo.toml @@ -12,32 +12,32 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true } # Bridge Dependencies -bp-beefy = { path = "../../primitives/beefy", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-beefy = { workspace = true } +bp-runtime = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy" } -mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2" } -pallet-beefy-mmr = { path = "../../../substrate/frame/beefy-mmr" } -pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range" } -rand = "0.8.5" -sp-io = { path = "../../../substrate/primitives/io" } -bp-test-utils = { path = "../../primitives/test-utils" } +sp-consensus-beefy = { workspace = true, default-features = true } +mmr-lib = { workspace = true } +pallet-beefy-mmr = { workspace = true, default-features = true } +pallet-mmr = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +bp-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/beefy/src/mock.rs b/bridges/modules/beefy/src/mock.rs index 53efd57c29a0dfc870e43be4fec7bcdf817a3282..3b751ddf066c9562cd8fc0f054b1b103306479dd 100644 --- a/bridges/modules/beefy/src/mock.rs +++ b/bridges/modules/beefy/src/mock.rs @@ -29,6 +29,7 @@ use sp_core::{sr25519::Signature, Pair}; use sp_runtime::{ testing::{Header, H256}, traits::{BlakeTwo256, Hash}, + StateVersion, }; pub use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Pair as BeefyPair}; @@ -93,6 +94,8 @@ impl Chain for TestBridgedChain { type Nonce = u64; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { unreachable!() } diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index 0ca6b67503511976ea9122f64e3c2e515e971177..6d1419ae5b030733ad9fb38a6a459ab7ce34f99f 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -13,32 +13,31 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -finality-grandpa = { version = "0.16.2", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-runtime = { workspace = true } +bp-header-chain = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-std = { workspace = true } # Optional Benchmarking Dependencies -bp-test-utils = { path = "../../primitives/test-utils", default-features = false, optional = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +bp-test-utils = { optional = true, workspace = true } +frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } +bp-runtime = { features = ["test-helpers"], workspace = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -47,7 +46,6 @@ std = [ "bp-runtime/std", "bp-test-utils/std", "codec/std", - "finality-grandpa/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", @@ -56,7 +54,6 @@ std = [ "sp-consensus-grandpa/std", "sp-runtime/std", "sp-std/std", - "sp-trie/std", ] runtime-benchmarks = [ "bp-test-utils", diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index 3b77f676870e1a28b8367f1b14d24c9ca83ece4a..c62951b74656b052d4858dec2af1393e41553029 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -1443,11 +1443,14 @@ mod tests { } #[test] - fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { + fn verify_storage_proof_rejects_unknown_header() { run_test(|| { assert_noop!( - Pallet::::storage_proof_checker(Default::default(), vec![],) - .map(|_| ()), + Pallet::::verify_storage_proof( + Default::default(), + Default::default(), + ) + .map(|_| ()), bp_header_chain::HeaderChainError::UnknownHeader, ); }); @@ -1465,9 +1468,7 @@ mod tests { >::put(HeaderId(2, hash)); >::insert(hash, header.build()); - assert_ok!( - Pallet::::storage_proof_checker(hash, storage_proof).map(|_| ()) - ); + assert_ok!(Pallet::::verify_storage_proof(hash, storage_proof).map(|_| ())); }); } diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index 27df9d9c78f540d0d73f74c6a86ba19af30d4b6b..71af6182e057cca3d06b98ee6fe94283b93ab77d 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -20,7 +20,8 @@ use bp_header_chain::ChainWithGrandpa; use bp_runtime::{Chain, ChainId}; use frame_support::{ - construct_runtime, derive_impl, parameter_types, traits::Hooks, weights::Weight, + construct_runtime, derive_impl, parameter_types, sp_runtime::StateVersion, traits::Hooks, + weights::Weight, }; use sp_core::sr25519::Signature; @@ -78,6 +79,8 @@ impl Chain for TestBridgedChain { type Nonce = u64; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { unreachable!() } diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 71c86ccc0361708684d0a93166f858118dbf0d92..33f524030d264e4ed292f8f67273e838e15fc3a9 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -11,54 +11,69 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge dependencies - -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Dependencies - -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-trie = { optional = true, workspace = true } [dev-dependencies] -bp-test-utils = { path = "../../primitives/test-utils" } -pallet-balances = { path = "../../../substrate/frame/balances" } -sp-io = { path = "../../../substrate/primitives/io" } +bp-runtime = { features = ["test-helpers"], workspace = true } +bp-test-utils = { workspace = true } +pallet-balances = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } [features] default = ["std"] std = [ + "bp-header-chain/std", "bp-messages/std", "bp-runtime/std", + "bp-test-utils/std", "codec/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", - "num-traits/std", + "pallet-balances/std", + "pallet-bridge-grandpa/std", "scale-info/std", + "sp-core/std", + "sp-io/std", "sp-runtime/std", "sp-std/std", + "sp-trie/std", ] runtime-benchmarks = [ + "bp-runtime/test-helpers", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-bridge-grandpa/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", + "pallet-bridge-grandpa/try-runtime", "sp-runtime/try-runtime", ] +test-helpers = [ + "bp-runtime/test-helpers", + "sp-trie", +] diff --git a/bridges/modules/messages/README.md b/bridges/modules/messages/README.md index c06b96b857dea1cdf7fdaed81e70d66aff116064..80fd92eb0e5a7d975ba45619838007a12f5f5553 100644 --- a/bridges/modules/messages/README.md +++ b/bridges/modules/messages/README.md @@ -104,17 +104,22 @@ the message. When a message is delivered to the target chain, the `MessagesDeliv `receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane identifier and inclusive range of delivered message nonces. -The pallet provides no means to get the result of message dispatch at the target chain. If that is required, it must be -done outside of the pallet. For example, XCM messages, when dispatched, have special instructions to send some data back -to the sender. Other dispatchers may use similar mechanism for that. -### How to plug-in Messages Module to Send Messages to the Bridged Chain? - -The `pallet_bridge_messages::Config` trait has 3 main associated types that are used to work with outbound messages. The -`pallet_bridge_messages::Config::TargetHeaderChain` defines how we see the bridged chain as the target for our outbound -messages. It must be able to check that the bridged chain may accept our message - like that the message has size below -maximal possible transaction size of the chain and so on. And when the relayer sends us a confirmation transaction, this -implementation must be able to parse and verify the proof of messages delivery. Normally, you would reuse the same -(configurable) type on all chains that are sending messages to the same bridged chain. +The pallet provides no means to get the result of message dispatch at the target chain. If that is +required, it must be done outside of the pallet. For example, XCM messages, when dispatched, have +special instructions to send some data back to the sender. Other dispatchers may use similar +mechanism for that. + +### How to plug-in Messages Module to Send and Receive Messages from the Bridged Chain? + +The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with +inbound messages. The `pallet_bridge_messages::BridgedChain` defines basic primitives of the bridged +chain. The `pallet_bridge_messages::BridgedHeaderChain` defines the way we access the bridged chain +headers in our runtime. You may use `pallet_bridge_grandpa` if you're bridging with chain that uses +GRANDPA finality or `pallet_bridge_parachains::ParachainHeaders` if you're bridging with parachain. + +The `pallet_bridge_messages::Config::MessageDispatch` defines a way on how to dispatch delivered +messages. Apart from actually dispatching the message, the implementation must return the correct +dispatch weight of the message before dispatch is called. The last type is the `pallet_bridge_messages::Config::DeliveryConfirmationPayments`. When confirmation transaction is received, we call the `pay_reward()` method, passing the range of delivered messages. @@ -129,18 +134,6 @@ You should be looking at the `bp_messages::source_chain::ForbidOutboundMessages` [`bp_messages::source_chain`](../../primitives/messages/src/source_chain.rs). It implements all required traits and will simply reject all transactions, related to outbound messages. -### How to plug-in Messages Module to Receive Messages from the Bridged Chain? - -The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with inbound messages. The -`pallet_bridge_messages::Config::SourceHeaderChain` defines how we see the bridged chain as the source of our inbound -messages. When relayer sends us a delivery transaction, this implementation must be able to parse and verify the proof -of messages wrapped in this transaction. Normally, you would reuse the same (configurable) type on all chains that are -sending messages to the same bridged chain. - -The `pallet_bridge_messages::Config::MessageDispatch` defines a way on how to dispatch delivered messages. Apart from -actually dispatching the message, the implementation must return the correct dispatch weight of the message before -dispatch is called. - ### I have a Messages Module in my Runtime, but I Want to Reject all Inbound Messages. What shall I do? You should be looking at the `bp_messages::target_chain::ForbidInboundMessages` structure from the @@ -150,36 +143,42 @@ and will simply reject all transactions, related to inbound messages. ### What about other Constants in the Messages Module Configuration Trait? Two settings that are used to check messages in the `send_message()` function. The -`pallet_bridge_messages::Config::ActiveOutboundLanes` is an array of all message lanes, that may be used to send -messages. All messages sent using other lanes are rejected. All messages that have size above -`pallet_bridge_messages::Config::MaximalOutboundPayloadSize` will also be rejected. - -To be able to reward the relayer for delivering messages, we store a map of message nonces range => identifier of the -relayer that has delivered this range at the target chain runtime storage. If a relayer delivers multiple consequent -ranges, they're merged into single entry. So there may be more than one entry for the same relayer. Eventually, this -whole map must be delivered back to the source chain to confirm delivery and pay rewards. So to make sure we are able to -craft this confirmation transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure -that the weight of processing this map is below a certain limit. Both size and processing weight mostly depend on the -number of entries. The number of entries is limited with the -`pallet_bridge_messages::ConfigMaxUnrewardedRelayerEntriesAtInboundLane` parameter. Processing weight also depends on -the total number of messages that are being confirmed, because every confirmed message needs to be read. So there's -another `pallet_bridge_messages::Config::MaxUnconfirmedMessagesAtInboundLane` parameter for that. - -When choosing values for these parameters, you must also keep in mind that if proof in your scheme is based on finality -of headers (and it is the most obvious option for Substrate-based chains with finality notion), then choosing too small -values for these parameters may cause significant delays in message delivery. That's because there are too many actors -involved in this scheme: 1) authorities that are finalizing headers of the target chain need to finalize header with -non-empty map; 2) the headers relayer then needs to submit this header and its finality proof to the source chain; 3) -the messages relayer must then send confirmation transaction (storage proof of this map) to the source chain; 4) when -the confirmation transaction will be mined at some header, source chain authorities must finalize this header; 5) the -headers relay then needs to submit this header and its finality proof to the target chain; 6) only now the messages -relayer may submit new messages from the source to target chain and prune the entry from the map. - -Delivery transaction requires the relayer to provide both number of entries and total number of messages in the map. -This means that the module never charges an extra cost for delivering a map - the relayer would need to pay exactly for -the number of entries+messages it has delivered. So the best guess for values of these parameters would be the pair that -would occupy `N` percent of the maximal transaction size and weight of the source chain. The `N` should be large enough -to process large maps, at the same time keeping reserve for future source chain upgrades. +`pallet_bridge_messages::Config::ActiveOutboundLanes` is an array of all message lanes, that +may be used to send messages. All messages sent using other lanes are rejected. All messages that have +size above `pallet_bridge_messages::Config::MaximalOutboundPayloadSize` will also be rejected. + +To be able to reward the relayer for delivering messages, we store a map of message nonces range => +identifier of the relayer that has delivered this range at the target chain runtime storage. If a +relayer delivers multiple consequent ranges, they're merged into single entry. So there may be more +than one entry for the same relayer. Eventually, this whole map must be delivered back to the source +chain to confirm delivery and pay rewards. So to make sure we are able to craft this confirmation +transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure that +the weight of processing this map is below a certain limit. Both size and processing weight mostly +depend on the number of entries. The number of entries is limited with the +`pallet_bridge_messages::Config::BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` parameter. +Processing weight also depends on the total number of messages that are being confirmed, because every +confirmed message needs to be read. So there's another +`pallet_bridge_messages::Config::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX` parameter +for that. + +When choosing values for these parameters, you must also keep in mind that if proof in your scheme +is based on finality of headers (and it is the most obvious option for Substrate-based chains with +finality notion), then choosing too small values for these parameters may cause significant delays +in message delivery. That's because there are too many actors involved in this scheme: 1) authorities +that are finalizing headers of the target chain need to finalize header with non-empty map; 2) the +headers relayer then needs to submit this header and its finality proof to the source chain; 3) the +messages relayer must then send confirmation transaction (storage proof of this map) to the source +chain; 4) when the confirmation transaction will be mined at some header, source chain authorities +must finalize this header; 5) the headers relay then needs to submit this header and its finality +proof to the target chain; 6) only now the messages relayer may submit new messages from the source +to target chain and prune the entry from the map. + +Delivery transaction requires the relayer to provide both number of entries and total number of +messages in the map. This means that the module never charges an extra cost for delivering a map - +the relayer would need to pay exactly for the number of entries+messages it has delivered. So the +best guess for values of these parameters would be the pair that would occupy `N` percent of the +maximal transaction size and weight of the source chain. The `N` should be large enough to process +large maps, at the same time keeping reserve for future source chain upgrades. ## Non-Essential Functionality diff --git a/bridges/modules/messages/src/benchmarking.rs b/bridges/modules/messages/src/benchmarking.rs index 4f13c4409672b3e76d36fd7d3dd2fab5c7e2ec1b..d38aaf32dc94bd157de0d3e910b729a7970c1684 100644 --- a/bridges/modules/messages/src/benchmarking.rs +++ b/bridges/modules/messages/src/benchmarking.rs @@ -16,19 +16,22 @@ //! Messages pallet benchmarking. +#![cfg(feature = "runtime-benchmarks")] + use crate::{ inbound_lane::InboundLaneStorage, outbound_lane, weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH, - Call, OutboundLanes, RuntimeInboundLaneStorage, + BridgedChainOf, Call, OutboundLanes, RuntimeInboundLaneStorage, }; use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages, DeliveredMessages, InboundLaneData, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, }; -use bp_runtime::StorageProofSize; +use bp_runtime::{AccountIdOf, HashOf, UnverifiedStorageProofParams}; use codec::Decode; -use frame_benchmarking::{account, benchmarks_instance_pallet}; +use frame_benchmarking::{account, v2::*}; use frame_support::weights::Weight; use frame_system::RawOrigin; use sp_runtime::{traits::TrailingZeroInput, BoundedVec}; @@ -54,7 +57,7 @@ pub struct MessageProofParams { /// return `true` from the `is_message_successfully_dispatched`. pub is_successful_dispatch_expected: bool, /// Proof size requirements. - pub size: StorageProofSize, + pub proof_params: UnverifiedStorageProofParams, } /// Benchmark-specific message delivery proof parameters. @@ -65,7 +68,7 @@ pub struct MessageDeliveryProofParams { /// The proof needs to include this inbound lane data. pub inbound_lane_data: InboundLaneData, /// Proof size requirements. - pub size: StorageProofSize, + pub proof_params: UnverifiedStorageProofParams, } /// Trait that must be implemented by runtime. @@ -80,8 +83,8 @@ pub trait Config: crate::Config { /// Return id of relayer account at the bridged chain. /// /// By default, zero account is returned. - fn bridged_relayer_id() -> Self::InboundRelayer { - Self::InboundRelayer::decode(&mut TrailingZeroInput::zeroes()).unwrap() + fn bridged_relayer_id() -> AccountIdOf> { + Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap() } /// Create given account and give it enough balance for test purposes. Used to create @@ -94,11 +97,11 @@ pub trait Config: crate::Config { /// Prepare messages proof to receive by the module. fn prepare_message_proof( params: MessageProofParams, - ) -> (::MessagesProof, Weight); + ) -> (FromBridgedChainMessagesProof>>, Weight); /// Prepare messages delivery proof to receive by the module. fn prepare_message_delivery_proof( params: MessageDeliveryProofParams, - ) -> >::MessagesDeliveryProof; + ) -> FromBridgedChainMessagesDeliveryProof>>; /// Returns true if message has been successfully dispatched or not. fn is_message_successfully_dispatched(_nonce: MessageNonce) -> bool { @@ -109,174 +112,227 @@ pub trait Config: crate::Config { fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool; } -benchmarks_instance_pallet! { +fn send_regular_message, I: 'static>() { + let mut outbound_lane = outbound_lane::(T::bench_lane_id()); + outbound_lane.send_message(BoundedVec::try_from(vec![]).expect("We craft valid messages")); +} + +fn receive_messages, I: 'static>(nonce: MessageNonce) { + let mut inbound_lane_storage = + RuntimeInboundLaneStorage::::from_lane_id(T::bench_lane_id()); + inbound_lane_storage.set_data(InboundLaneData { + relayers: vec![UnrewardedRelayer { + relayer: T::bridged_relayer_id(), + messages: DeliveredMessages::new(nonce), + }] + .into_iter() + .collect(), + last_confirmed_nonce: 0, + }); +} + +struct ReceiveMessagesProofSetup, I: 'static> { + relayer_id_on_src: AccountIdOf>, + relayer_id_on_tgt: T::AccountId, + msgs_count: u32, + _phantom_data: sp_std::marker::PhantomData, +} + +impl, I: 'static> ReceiveMessagesProofSetup { + const LATEST_RECEIVED_NONCE: MessageNonce = 20; + + fn new(msgs_count: u32) -> Self { + let setup = Self { + relayer_id_on_src: T::bridged_relayer_id(), + relayer_id_on_tgt: account("relayer", 0, SEED), + msgs_count, + _phantom_data: Default::default(), + }; + T::endow_account(&setup.relayer_id_on_tgt); + // mark messages 1..=latest_recvd_nonce as delivered + receive_messages::(Self::LATEST_RECEIVED_NONCE); + + setup + } + + fn relayer_id_on_src(&self) -> AccountIdOf> { + self.relayer_id_on_src.clone() + } + + fn relayer_id_on_tgt(&self) -> T::AccountId { + self.relayer_id_on_tgt.clone() + } + + fn last_nonce(&self) -> MessageNonce { + Self::LATEST_RECEIVED_NONCE + self.msgs_count as u64 + } + + fn nonces(&self) -> RangeInclusive { + (Self::LATEST_RECEIVED_NONCE + 1)..=self.last_nonce() + } + + fn check_last_nonce(&self) { + assert_eq!( + crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), + self.last_nonce(), + ); + } +} + +#[instance_benchmarks] +mod benchmarks { + use super::*; + // // Benchmarks that are used directly by the runtime calls weight formulae. // - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + fn max_msgs, I: 'static>() -> u32 { + T::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX as u32 - + ReceiveMessagesProofSetup::::LATEST_RECEIVED_NONCE as u32 + } + + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following + // conditions: // * proof does not include outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is dispatched (reminder: dispatch weight should be minimal); // * message requires all heavy checks done by dispatcher. - // - // This is base benchmark for all other message delivery benchmarks. - receive_single_message_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + #[benchmark] + fn receive_single_message_proof() { + // setup code + let setup = ReceiveMessagesProofSetup::::new(1); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=21, + message_nonces: setup.nonces(), outbound_lane_data: None, is_successful_dispatch_expected: false, - size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + proof_params: UnverifiedStorageProofParams::from_db_size( + EXPECTED_DEFAULT_MESSAGE_LENGTH, + ), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, + + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, ); + + // verification code + setup.check_last_nonce(); } - // Benchmark `receive_messages_proof` extrinsic with two minimal-weight messages and following conditions: + // Benchmark `receive_messages_proof` extrinsic with `n` minimal-weight messages and following + // conditions: // * proof does not include outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is dispatched (reminder: dispatch weight should be minimal); // * message requires all heavy checks done by dispatcher. - // - // The weight of single message delivery could be approximated as - // `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_two_messages_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + #[benchmark] + fn receive_n_messages_proof(n: Linear<1, { max_msgs::() }>) { + // setup code + let setup = ReceiveMessagesProofSetup::::new(n); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=22, + message_nonces: setup.nonces(), outbound_lane_data: None, is_successful_dispatch_expected: false, - size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + proof_params: UnverifiedStorageProofParams::from_db_size( + EXPECTED_DEFAULT_MESSAGE_LENGTH, + ), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 22, + + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, ); + + // verification code + setup.check_last_nonce(); } - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following + // conditions: // * proof includes outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is successfully dispatched (reminder: dispatch weight should be minimal); // * message requires all heavy checks done by dispatcher. // // The weight of outbound lane state delivery would be - // `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_single_message_proof_with_outbound_lane_state { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + // `weight(receive_single_message_proof_with_outbound_lane_state) - + // weight(receive_single_message_proof)`. This won't be super-accurate if message has non-zero + // dispatch weight, but estimation should be close enough to real weight. + #[benchmark] + fn receive_single_message_proof_with_outbound_lane_state() { + // setup code + let setup = ReceiveMessagesProofSetup::::new(1); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=21, + message_nonces: setup.nonces(), outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 21, - latest_received_nonce: 20, - latest_generated_nonce: 21, + oldest_unpruned_nonce: setup.last_nonce(), + latest_received_nonce: ReceiveMessagesProofSetup::::LATEST_RECEIVED_NONCE, + latest_generated_nonce: setup.last_nonce(), }), is_successful_dispatch_expected: false, - size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + proof_params: UnverifiedStorageProofParams::from_db_size( + EXPECTED_DEFAULT_MESSAGE_LENGTH, + ), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - let lane_state = crate::InboundLanes::::get(&T::bench_lane_id()); - assert_eq!(lane_state.last_delivered_nonce(), 21); - assert_eq!(lane_state.last_confirmed_nonce, 20); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has large leaf with total size of approximately 1KB; - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is dispatched (reminder: dispatch weight should be minimal); - // * message requires all heavy checks done by dispatcher. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof_1_kb) / 15`. - receive_single_message_proof_1_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - is_successful_dispatch_expected: false, - size: StorageProofSize::HasLargeLeaf(1024), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, ); + + // verification code + setup.check_last_nonce(); } - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has large leaf with total size of approximately 16KB; + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following + // conditions: + // * the proof has large leaf with total size ranging between 1KB and 16KB; // * proof does not include outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is dispatched (reminder: dispatch weight should be minimal); // * message requires all heavy checks done by dispatcher. - // - // Size of proof grows because it contains extra trie nodes in it. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof) / 15`. - receive_single_message_proof_16_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + #[benchmark] + fn receive_single_n_bytes_message_proof( + /// Proof size in KB + n: Linear<1, { 16 * 1024 }>, + ) { + // setup code + let setup = ReceiveMessagesProofSetup::::new(1); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=21, + message_nonces: setup.nonces(), outbound_lane_data: None, is_successful_dispatch_expected: false, - size: StorageProofSize::HasLargeLeaf(16 * 1024), + proof_params: UnverifiedStorageProofParams::from_db_size(n), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, + + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, ); + + // verification code + setup.check_last_nonce(); } // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: @@ -284,7 +340,8 @@ benchmarks_instance_pallet! { // * relayer account does not exist (in practice it needs to exist in production environment). // // This is base benchmark for all other confirmations delivery benchmarks. - receive_delivery_proof_for_single_message { + #[benchmark] + fn receive_delivery_proof_for_single_message() { let relayer_id: T::AccountId = account("relayer", 0, SEED); // send message that we're going to confirm @@ -302,13 +359,21 @@ benchmarks_instance_pallet! { relayers: vec![UnrewardedRelayer { relayer: relayer_id.clone(), messages: DeliveredMessages::new(1), - }].into_iter().collect(), + }] + .into_iter() + .collect(), last_confirmed_nonce: 0, }, - size: StorageProofSize::Minimal(0), + proof_params: UnverifiedStorageProofParams::default(), }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { + + #[extrinsic_call] + receive_messages_delivery_proof( + RawOrigin::Signed(relayer_id.clone()), + proof, + relayers_state, + ); + assert_eq!(OutboundLanes::::get(T::bench_lane_id()).latest_received_nonce, 1); assert!(T::is_relayer_rewarded(&relayer_id)); } @@ -320,7 +385,8 @@ benchmarks_instance_pallet! { // Additional weight for paying single-message reward to the same relayer could be computed // as `weight(receive_delivery_proof_for_two_messages_by_single_relayer) // - weight(receive_delivery_proof_for_single_message)`. - receive_delivery_proof_for_two_messages_by_single_relayer { + #[benchmark] + fn receive_delivery_proof_for_two_messages_by_single_relayer() { let relayer_id: T::AccountId = account("relayer", 0, SEED); // send message that we're going to confirm @@ -341,13 +407,21 @@ benchmarks_instance_pallet! { relayers: vec![UnrewardedRelayer { relayer: relayer_id.clone(), messages: delivered_messages, - }].into_iter().collect(), + }] + .into_iter() + .collect(), last_confirmed_nonce: 0, }, - size: StorageProofSize::Minimal(0), + proof_params: UnverifiedStorageProofParams::default(), }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { + + #[extrinsic_call] + receive_messages_delivery_proof( + RawOrigin::Signed(relayer_id.clone()), + proof, + relayers_state, + ); + assert_eq!(OutboundLanes::::get(T::bench_lane_id()).latest_received_nonce, 2); assert!(T::is_relayer_rewarded(&relayer_id)); } @@ -359,7 +433,8 @@ benchmarks_instance_pallet! { // Additional weight for paying reward to the next relayer could be computed // as `weight(receive_delivery_proof_for_two_messages_by_two_relayers) // - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`. - receive_delivery_proof_for_two_messages_by_two_relayers { + #[benchmark] + fn receive_delivery_proof_for_two_messages_by_two_relayers() { let relayer1_id: T::AccountId = account("relayer1", 1, SEED); let relayer2_id: T::AccountId = account("relayer2", 2, SEED); @@ -385,13 +460,21 @@ benchmarks_instance_pallet! { relayer: relayer2_id.clone(), messages: DeliveredMessages::new(2), }, - ].into_iter().collect(), + ] + .into_iter() + .collect(), last_confirmed_nonce: 0, }, - size: StorageProofSize::Minimal(0), + proof_params: UnverifiedStorageProofParams::default(), }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer1_id.clone()), proof, relayers_state) - verify { + + #[extrinsic_call] + receive_messages_delivery_proof( + RawOrigin::Signed(relayer1_id.clone()), + proof, + relayers_state, + ); + assert_eq!(OutboundLanes::::get(T::bench_lane_id()).latest_received_nonce, 2); assert!(T::is_relayer_rewarded(&relayer1_id)); assert!(T::is_relayer_rewarded(&relayer2_id)); @@ -411,51 +494,38 @@ benchmarks_instance_pallet! { // * inbound lane already has state, so it needs to be read and decoded; // * message is **SUCCESSFULLY** dispatched; // * message requires all heavy checks done by dispatcher. - receive_single_message_proof_with_dispatch { - // maybe dispatch weight relies on the message size too? - let i in EXPECTED_DEFAULT_MESSAGE_LENGTH .. EXPECTED_DEFAULT_MESSAGE_LENGTH * 16; - - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + #[benchmark] + fn receive_single_n_bytes_message_proof_with_dispatch( + /// Proof size in KB + n: Linear<1, { 16 * 1024 }>, + ) { + // setup code + let setup = ReceiveMessagesProofSetup::::new(1); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=21, + message_nonces: setup.nonces(), outbound_lane_data: None, is_successful_dispatch_expected: true, - size: StorageProofSize::Minimal(i), + proof_params: UnverifiedStorageProofParams::from_db_size(n), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - assert!(T::is_message_successfully_dispatched(21)); - } - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) -} + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, + ); -fn send_regular_message, I: 'static>() { - let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - outbound_lane.send_message(BoundedVec::try_from(vec![]).expect("We craft valid messages")); -} + // verification code + setup.check_last_nonce(); + assert!(T::is_message_successfully_dispatched(setup.last_nonce())); + } -fn receive_messages, I: 'static>(nonce: MessageNonce) { - let mut inbound_lane_storage = - RuntimeInboundLaneStorage::::from_lane_id(T::bench_lane_id()); - inbound_lane_storage.set_data(InboundLaneData { - relayers: vec![UnrewardedRelayer { - relayer: T::bridged_relayer_id(), - messages: DeliveredMessages::new(nonce), - }] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }); + impl_benchmark_test_suite!( + Pallet, + crate::tests::mock::new_test_ext(), + crate::tests::mock::TestRuntime + ); } diff --git a/bridges/modules/messages/src/inbound_lane.rs b/bridges/modules/messages/src/inbound_lane.rs index da1698e6e0370f9f84ca8dd53bc1ebc99f696017..7ef4599a93c4823a9be7fa918b35c309cea6611a 100644 --- a/bridges/modules/messages/src/inbound_lane.rs +++ b/bridges/modules/messages/src/inbound_lane.rs @@ -16,15 +16,15 @@ //! Everything about incoming messages receival. -use crate::Config; +use crate::{BridgedChainOf, Config}; use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, - ReceptionResult, UnrewardedRelayer, + ChainWithMessages, DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, + OutboundLaneData, ReceptionResult, UnrewardedRelayer, }; +use bp_runtime::AccountIdOf; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::traits::Get; use scale_info::{Type, TypeInfo}; use sp_runtime::RuntimeDebug; use sp_std::prelude::PartialEq; @@ -55,10 +55,12 @@ pub trait InboundLaneStorage { /// /// The encoding of this type matches encoding of the corresponding `MessageData`. #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] -pub struct StoredInboundLaneData, I: 'static>(pub InboundLaneData); +pub struct StoredInboundLaneData, I: 'static>( + pub InboundLaneData>>, +); impl, I: 'static> sp_std::ops::Deref for StoredInboundLaneData { - type Target = InboundLaneData; + type Target = InboundLaneData>>; fn deref(&self) -> &Self::Target { &self.0 @@ -78,7 +80,7 @@ impl, I: 'static> Default for StoredInboundLaneData { } impl, I: 'static> From> - for InboundLaneData + for InboundLaneData>> { fn from(data: StoredInboundLaneData) -> Self { data.0 @@ -86,7 +88,7 @@ impl, I: 'static> From> } impl, I: 'static> EncodeLike> - for InboundLaneData + for InboundLaneData>> { } @@ -94,14 +96,14 @@ impl, I: 'static> TypeInfo for StoredInboundLaneData { type Identity = Self; fn type_info() -> Type { - InboundLaneData::::type_info() + InboundLaneData::>>::type_info() } } impl, I: 'static> MaxEncodedLen for StoredInboundLaneData { fn max_encoded_len() -> usize { - InboundLaneData::::encoded_size_hint( - T::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize, + InboundLaneData::>>::encoded_size_hint( + BridgedChainOf::::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX as usize, ) .unwrap_or(usize::MAX) } @@ -216,10 +218,10 @@ mod tests { use super::*; use crate::{ inbound_lane, - mock::{ + tests::mock::{ dispatch_result, inbound_message_data, inbound_unrewarded_relayers_state, run_test, - unrewarded_relayer, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, - TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C, + unrewarded_relayer, BridgedChain, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, + TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C, }, RuntimeInboundLaneStorage, }; @@ -372,8 +374,7 @@ mod tests { fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() { run_test(|| { let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = - ::MaxUnrewardedRelayerEntriesAtInboundLane::get(); + let max_nonce = BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; for current_nonce in 1..max_nonce + 1 { assert_eq!( lane.receive_message::( @@ -409,7 +410,7 @@ mod tests { fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() { run_test(|| { let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = ::MaxUnconfirmedMessagesAtInboundLane::get(); + let max_nonce = BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; for current_nonce in 1..=max_nonce { assert_eq!( lane.receive_message::( diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs index e31a4542056cb30466f236d0dc9957c053a03f66..bf105b14040185f61e627a35f4f53b3a66b48c19 100644 --- a/bridges/modules/messages/src/lib.rs +++ b/bridges/modules/messages/src/lib.rs @@ -41,8 +41,8 @@ pub use outbound_lane::StoredMessagePayload; pub use weights::WeightInfo; pub use weights_ext::{ ensure_able_to_receive_confirmation, ensure_able_to_receive_message, - ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH, - EXTRA_STORAGE_PROOF_SIZE, + ensure_maximal_message_dispatch, ensure_weights_are_correct, WeightInfoExt, + EXPECTED_DEFAULT_MESSAGE_LENGTH, EXTRA_STORAGE_PROOF_SIZE, }; use crate::{ @@ -50,20 +50,23 @@ use crate::{ outbound_lane::{OutboundLane, OutboundLaneStorage, ReceptionConfirmationError}, }; +use bp_header_chain::HeaderChain; use bp_messages::{ source_chain::{ - DeliveryConfirmationPayments, OnMessagesDelivered, SendMessageArtifacts, TargetHeaderChain, + DeliveryConfirmationPayments, FromBridgedChainMessagesDeliveryProof, OnMessagesDelivered, + SendMessageArtifacts, }, target_chain::{ - DeliveryPayments, DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, - SourceHeaderChain, + DeliveryPayments, DispatchMessage, FromBridgedChainMessagesProof, MessageDispatch, + ProvedLaneMessages, ProvedMessages, }, - DeliveredMessages, InboundLaneData, InboundMessageDetails, LaneId, MessageKey, MessageNonce, - MessagePayload, MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails, - UnrewardedRelayersState, VerificationError, + ChainWithMessages, DeliveredMessages, InboundLaneData, InboundMessageDetails, LaneId, + MessageKey, MessageNonce, MessagePayload, MessagesOperatingMode, OutboundLaneData, + OutboundMessageDetails, UnrewardedRelayersState, VerificationError, }; use bp_runtime::{ - BasicOperatingMode, ChainId, OwnedBridgeModule, PreComputedSize, RangeInclusiveExt, Size, + AccountIdOf, BasicOperatingMode, HashOf, OwnedBridgeModule, PreComputedSize, RangeInclusiveExt, + Size, }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{dispatch::PostDispatchInfo, ensure, fail, traits::Get, DefaultNoBound}; @@ -72,6 +75,8 @@ use sp_std::{marker::PhantomData, prelude::*}; mod inbound_lane; mod outbound_lane; +mod proofs; +mod tests; mod weights_ext; pub mod weights; @@ -79,10 +84,9 @@ pub mod weights; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; -#[cfg(test)] -mod mock; - pub use pallet::*; +#[cfg(feature = "test-helpers")] +pub use tests::*; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "runtime::bridge-messages"; @@ -105,76 +109,39 @@ pub mod pallet { /// Benchmarks results from runtime we're plugged into. type WeightInfo: WeightInfoExt; - /// Gets the chain id value from the instance. - #[pallet::constant] - type BridgedChainId: Get; + /// This chain type. + type ThisChain: ChainWithMessages; + /// Bridged chain type. + type BridgedChain: ChainWithMessages; + /// Bridged chain headers provider. + type BridgedHeaderChain: HeaderChain; /// Get all active outbound lanes that the message pallet is serving. type ActiveOutboundLanes: Get<&'static [LaneId]>; - /// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the - /// relayer has delivered messages, but either confirmations haven't been delivered back to - /// the source chain, or we haven't received reward confirmations yet. - /// - /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep - /// in mind that the same relayer account may take several (non-consecutive) entries in this - /// set. - type MaxUnrewardedRelayerEntriesAtInboundLane: Get; - /// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the - /// message has been delivered, but either confirmations haven't been delivered back to the - /// source chain, or we haven't received reward confirmations for these messages yet. - /// - /// This constant limits difference between last message from last entry of the - /// `InboundLaneData::relayers` and first message at the first entry. - /// - /// There is no point of making this parameter lesser than - /// MaxUnrewardedRelayerEntriesAtInboundLane, because then maximal number of relayer entries - /// will be limited by maximal number of messages. - /// - /// This value also represents maximal number of messages in single delivery transaction. - /// Transaction that is declaring more messages than this value, will be rejected. Even if - /// these messages are from different lanes. - type MaxUnconfirmedMessagesAtInboundLane: Get; - - /// Maximal encoded size of the outbound payload. - #[pallet::constant] - type MaximalOutboundPayloadSize: Get; + /// Payload type of outbound messages. This payload is dispatched on the bridged chain. type OutboundPayload: Parameter + Size; - /// Payload type of inbound messages. This payload is dispatched on this chain. type InboundPayload: Decode; - /// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the - /// bridged chain. - type InboundRelayer: Parameter + MaxEncodedLen; - /// Delivery payments. - type DeliveryPayments: DeliveryPayments; - - // Types that are used by outbound_lane (on source chain). - /// Target header chain. - type TargetHeaderChain: TargetHeaderChain; - /// Delivery confirmation payments. + /// Handler for relayer payments that happen during message delivery transaction. + type DeliveryPayments: DeliveryPayments; + /// Handler for relayer payments that happen during message delivery confirmation + /// transaction. type DeliveryConfirmationPayments: DeliveryConfirmationPayments; /// Delivery confirmation callback. type OnMessagesDelivered: OnMessagesDelivered; - // Types that are used by inbound_lane (on target chain). - - /// Source header chain, as it is represented on target chain. - type SourceHeaderChain: SourceHeaderChain; - /// Message dispatch. + /// Message dispatch handler. type MessageDispatch: MessageDispatch; } - /// Shortcut to messages proof type for Config. - pub type MessagesProofOf = - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof; - /// Shortcut to messages delivery proof type for Config. - pub type MessagesDeliveryProofOf = - <>::TargetHeaderChain as TargetHeaderChain< - >::OutboundPayload, - ::AccountId, - >>::MessagesDeliveryProof; + /// Shortcut to this chain type for Config. + pub type ThisChainOf = >::ThisChain; + /// Shortcut to bridged chain type for Config. + pub type BridgedChainOf = >::BridgedChain; + /// Shortcut to bridged header chain type for Config. + pub type BridgedHeaderChainOf = >::BridgedHeaderChain; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -265,11 +232,11 @@ pub mod pallet { /// The call may succeed, but some messages may not be delivered e.g. if they are not fit /// into the unrewarded relayers vector. #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight))] + #[pallet::weight(T::WeightInfo::receive_messages_proof_weight(&**proof, *messages_count, *dispatch_weight))] pub fn receive_messages_proof( origin: OriginFor, - relayer_id_at_bridged_chain: T::InboundRelayer, - proof: MessagesProofOf, + relayer_id_at_bridged_chain: AccountIdOf>, + proof: Box>>>, messages_count: u32, dispatch_weight: Weight, ) -> DispatchResultWithPostInfo { @@ -278,7 +245,8 @@ pub mod pallet { // reject transactions that are declaring too many messages ensure!( - MessageNonce::from(messages_count) <= T::MaxUnconfirmedMessagesAtInboundLane::get(), + MessageNonce::from(messages_count) <= + BridgedChainOf::::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, Error::::TooManyMessagesInTheProof ); @@ -296,22 +264,19 @@ pub mod pallet { // The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible // to get pre-computed value (and it has been already computed by the executive). let declared_weight = T::WeightInfo::receive_messages_proof_weight( - &proof, + &*proof, messages_count, dispatch_weight, ); let mut actual_weight = declared_weight; // verify messages proof && convert proof into messages - let messages = verify_and_decode_messages_proof::< - T::SourceHeaderChain, - T::InboundPayload, - >(proof, messages_count) - .map_err(|err| { - log::trace!(target: LOG_TARGET, "Rejecting invalid messages proof: {:?}", err,); + let messages = verify_and_decode_messages_proof::(*proof, messages_count) + .map_err(|err| { + log::trace!(target: LOG_TARGET, "Rejecting invalid messages proof: {:?}", err,); - Error::::InvalidMessagesProof - })?; + Error::::InvalidMessagesProof + })?; // dispatch messages and (optionally) update lane(s) state(s) let mut total_messages = 0; @@ -424,14 +389,14 @@ pub mod pallet { ))] pub fn receive_messages_delivery_proof( origin: OriginFor, - proof: MessagesDeliveryProofOf, + proof: FromBridgedChainMessagesDeliveryProof>>, mut relayers_state: UnrewardedRelayersState, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; let proof_size = proof.size(); let confirmation_relayer = ensure_signed(origin)?; - let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof) + let (lane_id, lane_data) = proofs::verify_messages_delivery_proof::(proof) .map_err(|err| { log::trace!( target: LOG_TARGET, @@ -542,8 +507,6 @@ pub mod pallet { InactiveOutboundLane, /// The inbound message dispatcher is inactive. MessageDispatchInactive, - /// Message has been treated as invalid by chain verifier. - MessageRejectedByChainVerifier(VerificationError), /// Message has been treated as invalid by the pallet logic. MessageRejectedByPallet(VerificationError), /// Submitter has failed to pay fee for delivering and dispatching messages. @@ -674,7 +637,9 @@ pub mod pallet { } /// Return inbound lane data. - pub fn inbound_lane_data(lane: LaneId) -> InboundLaneData { + pub fn inbound_lane_data( + lane: LaneId, + ) -> InboundLaneData>> { InboundLanes::::get(lane).0 } } @@ -714,18 +679,6 @@ where // let's check if outbound lane is active ensure!(T::ActiveOutboundLanes::get().contains(&lane), Error::::InactiveOutboundLane); - // let's first check if message can be delivered to target chain - T::TargetHeaderChain::verify_message(message).map_err(|err| { - log::trace!( - target: LOG_TARGET, - "Message to lane {:?} is rejected by target chain: {:?}", - lane, - err, - ); - - Error::::MessageRejectedByChainVerifier(err) - })?; - Ok(SendMessageArgs { lane_id: lane, payload: StoredMessagePayload::::try_from(message.encode()).map_err(|_| { @@ -785,7 +738,7 @@ fn outbound_lane, I: 'static>( /// Runtime inbound lane storage. struct RuntimeInboundLaneStorage, I: 'static = ()> { lane_id: LaneId, - cached_data: Option>, + cached_data: Option>>>, _phantom: PhantomData, } @@ -802,39 +755,39 @@ impl, I: 'static> RuntimeInboundLaneStorage { /// maximal configured. /// /// Maximal inbound lane state set size is configured by the - /// `MaxUnrewardedRelayerEntriesAtInboundLane` constant from the pallet configuration. The PoV + /// `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` constant from the pallet configuration. The PoV /// of the call includes the maximal size of inbound lane state. If the actual size is smaller, /// we may subtract extra bytes from this component. pub fn extra_proof_size_bytes(&mut self) -> u64 { let max_encoded_len = StoredInboundLaneData::::max_encoded_len(); let relayers_count = self.get_or_init_data().relayers.len(); let actual_encoded_len = - InboundLaneData::::encoded_size_hint(relayers_count) + InboundLaneData::>>::encoded_size_hint(relayers_count) .unwrap_or(usize::MAX); max_encoded_len.saturating_sub(actual_encoded_len) as _ } } impl, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage { - type Relayer = T::InboundRelayer; + type Relayer = AccountIdOf>; fn id(&self) -> LaneId { self.lane_id } fn max_unrewarded_relayer_entries(&self) -> MessageNonce { - T::MaxUnrewardedRelayerEntriesAtInboundLane::get() + BridgedChainOf::::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX } fn max_unconfirmed_messages(&self) -> MessageNonce { - T::MaxUnconfirmedMessagesAtInboundLane::get() + BridgedChainOf::::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX } - fn get_or_init_data(&mut self) -> InboundLaneData { + fn get_or_init_data(&mut self) -> InboundLaneData>> { match self.cached_data { Some(ref data) => data.clone(), None => { - let data: InboundLaneData = + let data: InboundLaneData>> = InboundLanes::::get(self.lane_id).into(); self.cached_data = Some(data.clone()); data @@ -842,7 +795,7 @@ impl, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage< } } - fn set_data(&mut self, data: InboundLaneData) { + fn set_data(&mut self, data: InboundLaneData>>) { self.cached_data = Some(data.clone()); InboundLanes::::insert(self.lane_id, StoredInboundLaneData::(data)) } @@ -887,14 +840,14 @@ impl, I: 'static> OutboundLaneStorage for RuntimeOutboundLaneStorag } /// Verify messages proof and return proved messages with decoded payload. -fn verify_and_decode_messages_proof( - proof: Chain::MessagesProof, +fn verify_and_decode_messages_proof, I: 'static>( + proof: FromBridgedChainMessagesProof>>, messages_count: u32, -) -> Result>, VerificationError> { - // `receive_messages_proof` weight formula and `MaxUnconfirmedMessagesAtInboundLane` check - // guarantees that the `message_count` is sane and Vec may be allocated. +) -> Result>, VerificationError> { + // `receive_messages_proof` weight formula and `MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX` + // check guarantees that the `message_count` is sane and Vec may be allocated. // (tx with too many messages will either be rejected from the pool, or will fail earlier) - Chain::verify_messages_proof(proof, messages_count).map(|messages_by_lane| { + proofs::verify_messages_proof::(proof, messages_count).map(|messages_by_lane| { messages_by_lane .into_iter() .map(|(lane, lane_data)| { @@ -909,1209 +862,3 @@ fn verify_and_decode_messages_proof::set_block_number(1); - System::::reset_events(); - } - - fn send_regular_message(lane_id: LaneId) { - get_ready_for_events(); - - let outbound_lane = outbound_lane::(lane_id); - let message_nonce = outbound_lane.data().latest_generated_nonce + 1; - let prev_enqueued_messages = outbound_lane.data().queued_messages().saturating_len(); - let valid_message = Pallet::::validate_message(lane_id, ®ULAR_PAYLOAD) - .expect("validate_message has failed"); - let artifacts = Pallet::::send_message(valid_message); - assert_eq!(artifacts.enqueued_messages, prev_enqueued_messages + 1); - - // check event with assigned nonce - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::MessageAccepted { - lane_id, - nonce: message_nonce - }), - topics: vec![], - }], - ); - } - - fn receive_messages_delivery_proof() { - System::::set_block_number(1); - System::::reset_events(); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: DeliveredMessages::new(1), - }] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - )); - - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::MessagesDelivered { - lane_id: TEST_LANE_ID, - messages: DeliveredMessages::new(1), - }), - topics: vec![], - }], - ); - } - - #[test] - fn pallet_rejects_transactions_if_halted() { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(TEST_LANE_ID); - - PalletOperatingMode::::put(MessagesOperatingMode::Basic( - BasicOperatingMode::Halted, - )); - - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), - Error::::NotOperatingNormally, - ); - - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(2, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ), - Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), - ); - - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - ), - Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), - ); - }); - } - - #[test] - fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(TEST_LANE_ID); - - PalletOperatingMode::::put( - MessagesOperatingMode::RejectingOutboundMessages, - ); - - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), - Error::::NotOperatingNormally, - ); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ),); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - )); - }); - } - - #[test] - fn send_message_works() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - }); - } - - #[test] - fn send_message_rejects_too_large_message() { - run_test(|| { - let mut message_payload = message_payload(1, 0); - // the payload isn't simply extra, so it'll definitely overflow - // `MAX_OUTBOUND_PAYLOAD_SIZE` if we add `MAX_OUTBOUND_PAYLOAD_SIZE` bytes to extra - message_payload - .extra - .extend_from_slice(&[0u8; MAX_OUTBOUND_PAYLOAD_SIZE as usize]); - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID, &message_payload.clone(),), - Error::::MessageRejectedByPallet( - VerificationError::MessageTooLarge - ), - ); - - // let's check that we're able to send `MAX_OUTBOUND_PAYLOAD_SIZE` messages - while message_payload.encoded_size() as u32 > MAX_OUTBOUND_PAYLOAD_SIZE { - message_payload.extra.pop(); - } - assert_eq!(message_payload.encoded_size() as u32, MAX_OUTBOUND_PAYLOAD_SIZE); - - let valid_message = - Pallet::::validate_message(TEST_LANE_ID, &message_payload) - .expect("validate_message has failed"); - Pallet::::send_message(valid_message); - }) - } - - #[test] - fn chain_verifier_rejects_invalid_message_in_send_message() { - run_test(|| { - // messages with this payload are rejected by target chain verifier - assert_noop!( - Pallet::::validate_message( - TEST_LANE_ID, - &PAYLOAD_REJECTED_BY_TARGET_CHAIN, - ), - Error::::MessageRejectedByChainVerifier(VerificationError::Other( - mock::TEST_ERROR - )), - ); - }); - } - - #[test] - fn receive_messages_proof_works() { - run_test(|| { - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - )); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).0.last_delivered_nonce(), 1); - - assert!(TestDeliveryPayments::is_reward_paid(1)); - }); - } - - #[test] - fn receive_messages_proof_updates_confirmed_message_nonce() { - run_test(|| { - // say we have received 10 messages && last confirmed message is 8 - InboundLanes::::insert( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 8, - relayers: vec![ - unrewarded_relayer(9, 9, TEST_RELAYER_A), - unrewarded_relayer(10, 10, TEST_RELAYER_B), - ] - .into_iter() - .collect(), - }, - ); - assert_eq!( - inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 10, - }, - ); - - // message proof includes outbound lane state with latest confirmed message updated to 9 - let mut message_proof: TestMessagesProof = - Ok(vec![message(11, REGULAR_PAYLOAD)]).into(); - message_proof.result.as_mut().unwrap()[0].1.lane_state = - Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() }); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - message_proof, - 1, - REGULAR_PAYLOAD.declared_weight, - )); - - assert_eq!( - InboundLanes::::get(TEST_LANE_ID).0, - InboundLaneData { - last_confirmed_nonce: 9, - relayers: vec![ - unrewarded_relayer(10, 10, TEST_RELAYER_B), - unrewarded_relayer(11, 11, TEST_RELAYER_A) - ] - .into_iter() - .collect(), - }, - ); - assert_eq!( - inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 11, - }, - ); - }); - } - - #[test] - fn receive_messages_fails_if_dispatcher_is_inactive() { - run_test(|| { - TestMessageDispatch::deactivate(); - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ), - Error::::MessageDispatchInactive, - ); - }); - } - - #[test] - fn receive_messages_proof_does_not_accept_message_if_dispatch_weight_is_not_enough() { - run_test(|| { - let mut declared_weight = REGULAR_PAYLOAD.declared_weight; - *declared_weight.ref_time_mut() -= 1; - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - declared_weight, - ), - Error::::InsufficientDispatchWeight - ); - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); - }); - } - - #[test] - fn receive_messages_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Err(()).into(), - 1, - Weight::zero(), - ), - Error::::InvalidMessagesProof, - ); - }); - } - - #[test] - fn receive_messages_proof_rejects_proof_with_too_many_messages() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - u32::MAX, - Weight::zero(), - ), - Error::::TooManyMessagesInTheProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_works() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - receive_messages_delivery_proof(); - - assert_eq!( - OutboundLanes::::get(TEST_LANE_ID).latest_received_nonce, - 1, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rewards_relayers() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - - // this reports delivery of message 1 => reward is paid to TEST_RELAYER_A - let single_message_delivery_proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(), - ..Default::default() - }, - ))); - let single_message_delivery_proof_size = single_message_delivery_proof.size(); - let result = Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - single_message_delivery_proof, - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - ); - assert_ok!(result); - assert_eq!( - result.unwrap().actual_weight.unwrap(), - TestWeightInfo::receive_messages_delivery_proof_weight( - &PreComputedSize(single_message_delivery_proof_size as _), - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - ) - ); - assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); - assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); - assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 1))); - - // this reports delivery of both message 1 and message 2 => reward is paid only to - // TEST_RELAYER_B - let two_messages_delivery_proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B), - ] - .into_iter() - .collect(), - ..Default::default() - }, - ))); - let two_messages_delivery_proof_size = two_messages_delivery_proof.size(); - let result = Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - two_messages_delivery_proof, - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 2, - }, - ); - assert_ok!(result); - // even though the pre-dispatch weight was for two messages, the actual weight is - // for single message only - assert_eq!( - result.unwrap().actual_weight.unwrap(), - TestWeightInfo::receive_messages_delivery_proof_weight( - &PreComputedSize(two_messages_delivery_proof_size as _), - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - ) - ); - assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); - assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); - assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 0))); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Err(())), - Default::default(), - ), - Error::::InvalidMessagesDeliveryProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() { - run_test(|| { - // when number of relayers entries is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 2, - last_delivered_nonce: 2, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - - // when number of messages is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 1, - last_delivered_nonce: 2, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - - // when last delivered nonce is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 2, - last_delivered_nonce: 8, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - }); - } - - #[test] - fn receive_messages_accepts_single_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(1, REGULAR_PAYLOAD); - invalid_message.payload = Vec::new(); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![invalid_message]).into(), - 1, - Weight::zero(), /* weight may be zero in this case (all messages are - * improperly encoded) */ - ),); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 1,); - }); - } - - #[test] - fn receive_messages_accepts_batch_with_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(2, REGULAR_PAYLOAD); - invalid_message.payload = Vec::new(); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok( - vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),] - ) - .into(), - 3, - REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight, - ),); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 3,); - }); - } - - #[test] - fn actual_dispatch_weight_does_not_overflow() { - run_test(|| { - let message1 = message(1, message_payload(0, u64::MAX / 2)); - let message2 = message(2, message_payload(0, u64::MAX / 2)); - let message3 = message(3, message_payload(0, u64::MAX / 2)); - - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - // this may cause overflow if source chain storage is invalid - Ok(vec![message1, message2, message3]).into(), - 3, - Weight::MAX, - ), - Error::::InsufficientDispatchWeight - ); - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); - }); - } - - #[test] - fn ref_time_refund_from_receive_messages_proof_works() { - run_test(|| { - fn submit_with_unspent_weight( - nonce: MessageNonce, - unspent_weight: u64, - ) -> (Weight, Weight) { - let mut payload = REGULAR_PAYLOAD; - *payload.dispatch_result.unspent_weight.ref_time_mut() = unspent_weight; - let proof = Ok(vec![message(nonce, payload)]).into(); - let messages_count = 1; - let pre_dispatch_weight = - ::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ); - let result = Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .expect("delivery has failed"); - let post_dispatch_weight = - result.actual_weight.expect("receive_messages_proof always returns Some"); - - // message delivery transactions are never free - assert_eq!(result.pays_fee, Pays::Yes); - - (pre_dispatch_weight, post_dispatch_weight) - } - - // when dispatch is returning `unspent_weight < declared_weight` - let (pre, post) = submit_with_unspent_weight(1, 1); - assert_eq!(post.ref_time(), pre.ref_time() - 1); - - // when dispatch is returning `unspent_weight = declared_weight` - let (pre, post) = - submit_with_unspent_weight(2, REGULAR_PAYLOAD.declared_weight.ref_time()); - assert_eq!( - post.ref_time(), - pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time() - ); - - // when dispatch is returning `unspent_weight > declared_weight` - let (pre, post) = - submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight.ref_time() + 1); - assert_eq!( - post.ref_time(), - pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time() - ); - - // when there's no unspent weight - let (pre, post) = submit_with_unspent_weight(4, 0); - assert_eq!(post.ref_time(), pre.ref_time()); - - // when dispatch is returning `unspent_weight < declared_weight` - let (pre, post) = submit_with_unspent_weight(5, 1); - assert_eq!(post.ref_time(), pre.ref_time() - 1); - }); - } - - #[test] - fn proof_size_refund_from_receive_messages_proof_works() { - run_test(|| { - let max_entries = crate::mock::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize; - - // if there's maximal number of unrewarded relayer entries at the inbound lane, then - // `proof_size` is unchanged in post-dispatch weight - let proof: TestMessagesProof = Ok(vec![message(101, REGULAR_PAYLOAD)]).into(); - let messages_count = 1; - let pre_dispatch_weight = - ::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ); - InboundLanes::::insert( - TEST_LANE_ID, - StoredInboundLaneData(InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: 42, - messages: DeliveredMessages { begin: 0, end: 100 } - }; - max_entries - ] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }), - ); - let post_dispatch_weight = Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - proof.clone(), - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .unwrap() - .actual_weight - .unwrap(); - assert_eq!(post_dispatch_weight.proof_size(), pre_dispatch_weight.proof_size()); - - // if count of unrewarded relayer entries is less than maximal, then some `proof_size` - // must be refunded - InboundLanes::::insert( - TEST_LANE_ID, - StoredInboundLaneData(InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: 42, - messages: DeliveredMessages { begin: 0, end: 100 } - }; - max_entries - 1 - ] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }), - ); - let post_dispatch_weight = Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .unwrap() - .actual_weight - .unwrap(); - assert!( - post_dispatch_weight.proof_size() < pre_dispatch_weight.proof_size(), - "Expected post-dispatch PoV {} to be less than pre-dispatch PoV {}", - post_dispatch_weight.proof_size(), - pre_dispatch_weight.proof_size(), - ); - }); - } - - #[test] - fn messages_delivered_callbacks_are_called() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - - // messages 1+2 are confirmed in 1 tx, message 3 in a separate tx - // dispatch of message 2 has failed - let mut delivered_messages_1_and_2 = DeliveredMessages::new(1); - delivered_messages_1_and_2.note_dispatched_message(); - let messages_1_and_2_proof = Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: delivered_messages_1_and_2.clone(), - }] - .into_iter() - .collect(), - }, - )); - let delivered_message_3 = DeliveredMessages::new(3); - let messages_3_proof = Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![UnrewardedRelayer { relayer: 0, messages: delivered_message_3 }] - .into_iter() - .collect(), - }, - )); - - // first tx with messages 1+2 - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(messages_1_and_2_proof), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 2, - total_messages: 2, - last_delivered_nonce: 2, - }, - )); - // second tx with message 3 - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(messages_3_proof), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 3, - }, - )); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected( - ) { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(TEST_LANE_ID); - - // 1) InboundLaneData declares that the `last_confirmed_nonce` is 1; - // 2) InboundLaneData has no entries => `InboundLaneData::last_delivered_nonce()` - // returns `last_confirmed_nonce`; - // 3) it means that we're going to confirm delivery of messages 1..=1; - // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and - // number of actually confirmed messages is `1`. - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() }, - ))), - UnrewardedRelayersState { last_delivered_nonce: 1, ..Default::default() }, - ), - Error::::ReceptionConfirmation( - ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected - ), - ); - }); - } - - #[test] - fn storage_keys_computed_properly() { - assert_eq!( - PalletOperatingMode::::storage_value_final_key().to_vec(), - bp_messages::storage_keys::operating_mode_key("Messages").0, - ); - - assert_eq!( - OutboundMessages::::storage_map_final_key(MessageKey { - lane_id: TEST_LANE_ID, - nonce: 42 - }), - bp_messages::storage_keys::message_key("Messages", &TEST_LANE_ID, 42).0, - ); - - assert_eq!( - OutboundLanes::::storage_map_final_key(TEST_LANE_ID), - bp_messages::storage_keys::outbound_lane_data_key("Messages", &TEST_LANE_ID).0, - ); - - assert_eq!( - InboundLanes::::storage_map_final_key(TEST_LANE_ID), - bp_messages::storage_keys::inbound_lane_data_key("Messages", &TEST_LANE_ID).0, - ); - } - - #[test] - fn inbound_message_details_works() { - run_test(|| { - assert_eq!( - Pallet::::inbound_message_data( - TEST_LANE_ID, - REGULAR_PAYLOAD.encode(), - OutboundMessageDetails { nonce: 0, dispatch_weight: Weight::zero(), size: 0 }, - ), - InboundMessageDetails { dispatch_weight: REGULAR_PAYLOAD.declared_weight }, - ); - }); - } - - #[test] - fn on_idle_callback_respects_remaining_weight() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 4, - relayers: vec![unrewarded_relayer(1, 4, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 4, - total_messages: 4, - last_delivered_nonce: 4, - }, - )); - - // all 4 messages may be pruned now - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, - 4 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 1 - ); - System::::set_block_number(2); - - // if passed wight is too low to do anything - let dbw = DbWeight::get(); - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(1, 1)), - Weight::zero(), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 1 - ); - - // if passed wight is enough to prune single message - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(1, 2)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 2 - ); - - // if passed wight is enough to prune two more messages - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(1, 3)), - dbw.reads_writes(1, 3), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 4 - ); - - // if passed wight is enough to prune many messages - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(100, 100)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 5 - ); - }); - } - - #[test] - fn on_idle_callback_is_rotating_lanes_to_prune() { - run_test(|| { - // send + receive confirmation for lane 1 - send_regular_message(TEST_LANE_ID); - receive_messages_delivery_proof(); - // send + receive confirmation for lane 2 - send_regular_message(TEST_LANE_ID_2); - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID_2, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - )); - - // nothing is pruned yet - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, - 1 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 1 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().latest_received_nonce, - 1 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, - 1 - ); - - // in block#2.on_idle lane messages of lane 1 are pruned - let dbw = DbWeight::get(); - System::::set_block_number(2); - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(100, 100)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 2 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, - 1 - ); - - // in block#3.on_idle lane messages of lane 2 are pruned - System::::set_block_number(3); - - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(100, 100)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 2 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, - 2 - ); - }); - } - - #[test] - fn outbound_message_from_unconfigured_lane_is_rejected() { - run_test(|| { - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID_3, ®ULAR_PAYLOAD,), - Error::::InactiveOutboundLane, - ); - }); - } - - #[test] - fn test_bridge_messages_call_is_correctly_defined() { - let account_id = 1; - let message_proof: TestMessagesProof = Ok(vec![message(1, REGULAR_PAYLOAD)]).into(); - let message_delivery_proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: DeliveredMessages::new(1), - }] - .into_iter() - .collect(), - }, - ))); - let unrewarded_relayer_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - last_delivered_nonce: 1, - ..Default::default() - }; - - let direct_receive_messages_proof_call = Call::::receive_messages_proof { - relayer_id_at_bridged_chain: account_id, - proof: message_proof.clone(), - messages_count: 1, - dispatch_weight: REGULAR_PAYLOAD.declared_weight, - }; - let indirect_receive_messages_proof_call = BridgeMessagesCall::< - AccountId, - TestMessagesProof, - TestMessagesDeliveryProof, - >::receive_messages_proof { - relayer_id_at_bridged_chain: account_id, - proof: message_proof, - messages_count: 1, - dispatch_weight: REGULAR_PAYLOAD.declared_weight, - }; - assert_eq!( - direct_receive_messages_proof_call.encode(), - indirect_receive_messages_proof_call.encode() - ); - - let direct_receive_messages_delivery_proof_call = - Call::::receive_messages_delivery_proof { - proof: message_delivery_proof.clone(), - relayers_state: unrewarded_relayer_state.clone(), - }; - let indirect_receive_messages_delivery_proof_call = BridgeMessagesCall::< - AccountId, - TestMessagesProof, - TestMessagesDeliveryProof, - >::receive_messages_delivery_proof { - proof: message_delivery_proof, - relayers_state: unrewarded_relayer_state, - }; - assert_eq!( - direct_receive_messages_delivery_proof_call.encode(), - indirect_receive_messages_delivery_proof_call.encode() - ); - } - - generate_owned_bridge_module_tests!( - MessagesOperatingMode::Basic(BasicOperatingMode::Normal), - MessagesOperatingMode::Basic(BasicOperatingMode::Halted) - ); - - #[test] - fn inbound_storage_extra_proof_size_bytes_works() { - fn relayer_entry() -> UnrewardedRelayer { - UnrewardedRelayer { relayer: 42u64, messages: DeliveredMessages { begin: 0, end: 100 } } - } - - fn storage(relayer_entries: usize) -> RuntimeInboundLaneStorage { - RuntimeInboundLaneStorage { - lane_id: Default::default(), - cached_data: Some(InboundLaneData { - relayers: vec![relayer_entry(); relayer_entries].into_iter().collect(), - last_confirmed_nonce: 0, - }), - _phantom: Default::default(), - } - } - - let max_entries = crate::mock::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize; - - // when we have exactly `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers - assert_eq!(storage(max_entries).extra_proof_size_bytes(), 0); - - // when we have less than `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers - assert_eq!( - storage(max_entries - 1).extra_proof_size_bytes(), - relayer_entry().encode().len() as u64 - ); - assert_eq!( - storage(max_entries - 2).extra_proof_size_bytes(), - 2 * relayer_entry().encode().len() as u64 - ); - - // when we have more than `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers - // (shall not happen in practice) - assert_eq!(storage(max_entries + 1).extra_proof_size_bytes(), 0); - } - - #[test] - fn maybe_outbound_lanes_count_returns_correct_value() { - assert_eq!( - MaybeOutboundLanesCount::::get(), - Some(mock::ActiveOutboundLanes::get().len() as u32) - ); - } -} diff --git a/bridges/modules/messages/src/outbound_lane.rs b/bridges/modules/messages/src/outbound_lane.rs index acef5546d2a64fa8a3fb38c6b41ae30819cdeaa2..fcdddf199dc65b37dd014745b6a1630709ad8f8b 100644 --- a/bridges/modules/messages/src/outbound_lane.rs +++ b/bridges/modules/messages/src/outbound_lane.rs @@ -18,16 +18,18 @@ use crate::{Config, LOG_TARGET}; -use bp_messages::{DeliveredMessages, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer}; +use bp_messages::{ + ChainWithMessages, DeliveredMessages, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer, +}; use codec::{Decode, Encode}; use frame_support::{ + traits::Get, weights::{RuntimeDbWeight, Weight}, BoundedVec, PalletError, }; -use num_traits::Zero; use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::collections::vec_deque::VecDeque; +use sp_runtime::{traits::Zero, RuntimeDebug}; +use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData}; /// Outbound lane storage. pub trait OutboundLaneStorage { @@ -48,8 +50,17 @@ pub trait OutboundLaneStorage { fn remove_message(&mut self, nonce: &MessageNonce); } +/// Limit for the `StoredMessagePayload` vector. +pub struct StoredMessagePayloadLimit(PhantomData<(T, I)>); + +impl, I: 'static> Get for StoredMessagePayloadLimit { + fn get() -> u32 { + T::BridgedChain::maximal_incoming_message_size() + } +} + /// Outbound message data wrapper that implements `MaxEncodedLen`. -pub type StoredMessagePayload = BoundedVec>::MaximalOutboundPayloadSize>; +pub type StoredMessagePayload = BoundedVec>; /// Result of messages receival confirmation. #[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)] @@ -204,11 +215,11 @@ fn ensure_unrewarded_relayers_are_correct( mod tests { use super::*; use crate::{ - mock::{ + outbound_lane, + tests::mock::{ outbound_message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, }, - outbound_lane, }; use frame_support::weights::constants::RocksDbWeight; use sp_std::ops::RangeInclusive; @@ -263,12 +274,43 @@ mod tests { assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 3); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); assert_eq!( lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), Ok(Some(delivered_messages(1..=3))), ); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); + }); + } + + #[test] + fn confirm_partial_delivery_works() { + run_test(|| { + let mut lane = outbound_lane::(TEST_LANE_ID); + assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 1); + assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 2); + assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 3); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 0); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); + + assert_eq!( + lane.confirm_delivery(3, 2, &unrewarded_relayers(1..=2)), + Ok(Some(delivered_messages(1..=2))), + ); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 2); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); + + assert_eq!( + lane.confirm_delivery(3, 3, &unrewarded_relayers(3..=3)), + Ok(Some(delivered_messages(3..=3))), + ); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 3); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); }); } @@ -281,6 +323,7 @@ mod tests { lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); assert_eq!( lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), Ok(Some(delivered_messages(1..=3))), @@ -288,10 +331,12 @@ mod tests { assert_eq!(lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), Ok(None),); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); assert_eq!(lane.confirm_delivery(1, 2, &unrewarded_relayers(1..=1)), Ok(None),); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); }); } @@ -310,8 +355,8 @@ mod tests { 3, &unrewarded_relayers(1..=1) .into_iter() - .chain(unrewarded_relayers(2..=30).into_iter()) - .chain(unrewarded_relayers(3..=3).into_iter()) + .chain(unrewarded_relayers(2..=30)) + .chain(unrewarded_relayers(3..=3)) .collect(), ), Err(ReceptionConfirmationError::FailedToConfirmFutureMessages), @@ -326,8 +371,8 @@ mod tests { 3, &unrewarded_relayers(1..=1) .into_iter() - .chain(unrewarded_relayers(2..=1).into_iter()) - .chain(unrewarded_relayers(2..=3).into_iter()) + .chain(unrewarded_relayers(2..=1)) + .chain(unrewarded_relayers(2..=3)) .collect(), ), Err(ReceptionConfirmationError::EmptyUnrewardedRelayerEntry), @@ -341,8 +386,8 @@ mod tests { 3, &unrewarded_relayers(1..=1) .into_iter() - .chain(unrewarded_relayers(3..=3).into_iter()) - .chain(unrewarded_relayers(2..=2).into_iter()) + .chain(unrewarded_relayers(3..=3)) + .chain(unrewarded_relayers(2..=2)) .collect(), ), Err(ReceptionConfirmationError::NonConsecutiveUnrewardedRelayerEntries), diff --git a/bridges/modules/messages/src/proofs.rs b/bridges/modules/messages/src/proofs.rs new file mode 100644 index 0000000000000000000000000000000000000000..18367029d72cd356939f19f769a36b634ad1991e --- /dev/null +++ b/bridges/modules/messages/src/proofs.rs @@ -0,0 +1,562 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tools for messages and delivery proof verification. + +use crate::{BridgedChainOf, BridgedHeaderChainOf, Config}; + +use bp_header_chain::{HeaderChain, HeaderChainError}; +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::{FromBridgedChainMessagesProof, ProvedLaneMessages, ProvedMessages}, + ChainWithMessages, InboundLaneData, LaneId, Message, MessageKey, MessageNonce, MessagePayload, + OutboundLaneData, VerificationError, +}; +use bp_runtime::{ + HashOf, HasherOf, RangeInclusiveExt, RawStorageProof, StorageProofChecker, StorageProofError, +}; +use codec::Decode; +use sp_std::vec::Vec; + +/// 'Parsed' message delivery proof - inbound lane id and its state. +pub(crate) type ParsedMessagesDeliveryProofFromBridgedChain = + (LaneId, InboundLaneData<::AccountId>); + +/// Verify proof of Bridged -> This chain messages. +/// +/// This function is used when Bridged chain is directly using GRANDPA finality. For Bridged +/// parachains, please use the `verify_messages_proof_from_parachain`. +/// +/// The `messages_count` argument verification (sane limits) is supposed to be made +/// outside of this function. This function only verifies that the proof declares exactly +/// `messages_count` messages. +pub fn verify_messages_proof, I: 'static>( + proof: FromBridgedChainMessagesProof>>, + messages_count: u32, +) -> Result, VerificationError> { + let FromBridgedChainMessagesProof { + bridged_header_hash, + storage_proof, + lane, + nonces_start, + nonces_end, + } = proof; + let mut parser: MessagesStorageProofAdapter = + MessagesStorageProofAdapter::try_new_with_verified_storage_proof( + bridged_header_hash, + storage_proof, + ) + .map_err(VerificationError::HeaderChain)?; + let nonces_range = nonces_start..=nonces_end; + + // receiving proofs where end < begin is ok (if proof includes outbound lane state) + let messages_in_the_proof = nonces_range.checked_len().unwrap_or(0); + if messages_in_the_proof != MessageNonce::from(messages_count) { + return Err(VerificationError::MessagesCountMismatch) + } + + // Read messages first. All messages that are claimed to be in the proof must + // be in the proof. So any error in `read_value`, or even missing value is fatal. + // + // Mind that we allow proofs with no messages if outbound lane state is proved. + let mut messages = Vec::with_capacity(messages_in_the_proof as _); + for nonce in nonces_range { + let message_key = MessageKey { lane_id: lane, nonce }; + let message_payload = parser + .read_and_decode_message_payload(&message_key) + .map_err(VerificationError::MessageStorage)?; + messages.push(Message { key: message_key, payload: message_payload }); + } + + // Now let's check if proof contains outbound lane state proof. It is optional, so + // we simply ignore `read_value` errors and missing value. + let proved_lane_messages = ProvedLaneMessages { + lane_state: parser + .read_and_decode_outbound_lane_data(&lane) + .map_err(VerificationError::OutboundLaneStorage)?, + messages, + }; + + // Now we may actually check if the proof is empty or not. + if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() { + return Err(VerificationError::EmptyMessageProof) + } + + // Check that the storage proof doesn't have any untouched keys. + parser.ensure_no_unused_keys().map_err(VerificationError::StorageProof)?; + + // We only support single lane messages in this generated_schema + let mut proved_messages = ProvedMessages::new(); + proved_messages.insert(lane, proved_lane_messages); + + Ok(proved_messages) +} + +/// Verify proof of This -> Bridged chain messages delivery. +pub fn verify_messages_delivery_proof, I: 'static>( + proof: FromBridgedChainMessagesDeliveryProof>>, +) -> Result, VerificationError> { + let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } = proof; + let mut parser: MessagesStorageProofAdapter = + MessagesStorageProofAdapter::try_new_with_verified_storage_proof( + bridged_header_hash, + storage_proof, + ) + .map_err(VerificationError::HeaderChain)?; + // Messages delivery proof is just proof of single storage key read => any error + // is fatal. + let storage_inbound_lane_data_key = bp_messages::storage_keys::inbound_lane_data_key( + T::ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + &lane, + ); + let inbound_lane_data = parser + .read_and_decode_mandatory_value(&storage_inbound_lane_data_key) + .map_err(VerificationError::InboundLaneStorage)?; + + // check that the storage proof doesn't have any untouched trie nodes + parser.ensure_no_unused_keys().map_err(VerificationError::StorageProof)?; + + Ok((lane, inbound_lane_data)) +} + +/// Abstraction over storage proof manipulation, hiding implementation details of actual storage +/// proofs. +trait StorageProofAdapter, I: 'static> { + fn read_and_decode_mandatory_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result; + fn read_and_decode_optional_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result, StorageProofError>; + fn ensure_no_unused_keys(self) -> Result<(), StorageProofError>; + + fn read_and_decode_outbound_lane_data( + &mut self, + lane_id: &LaneId, + ) -> Result, StorageProofError> { + let storage_outbound_lane_data_key = bp_messages::storage_keys::outbound_lane_data_key( + T::ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + lane_id, + ); + self.read_and_decode_optional_value(&storage_outbound_lane_data_key) + } + + fn read_and_decode_message_payload( + &mut self, + message_key: &MessageKey, + ) -> Result { + let storage_message_key = bp_messages::storage_keys::message_key( + T::ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + &message_key.lane_id, + message_key.nonce, + ); + self.read_and_decode_mandatory_value(&storage_message_key) + } +} + +/// Actual storage proof adapter for messages proofs. +type MessagesStorageProofAdapter = StorageProofCheckerAdapter; + +/// A `StorageProofAdapter` implementation for raw storage proofs. +struct StorageProofCheckerAdapter, I: 'static> { + storage: StorageProofChecker>>, + _dummy: sp_std::marker::PhantomData<(T, I)>, +} + +impl, I: 'static> StorageProofCheckerAdapter { + fn try_new_with_verified_storage_proof( + bridged_header_hash: HashOf>, + storage_proof: RawStorageProof, + ) -> Result { + BridgedHeaderChainOf::::verify_storage_proof(bridged_header_hash, storage_proof).map( + |storage| StorageProofCheckerAdapter:: { storage, _dummy: Default::default() }, + ) + } +} + +impl, I: 'static> StorageProofAdapter for StorageProofCheckerAdapter { + fn read_and_decode_optional_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result, StorageProofError> { + self.storage.read_and_decode_opt_value(key.as_ref()) + } + + fn read_and_decode_mandatory_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result { + self.storage.read_and_decode_mandatory_value(key.as_ref()) + } + + fn ensure_no_unused_keys(self) -> Result<(), StorageProofError> { + self.storage.ensure_no_unused_nodes() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{ + messages_generation::{ + encode_all_messages, encode_lane_data, generate_dummy_message, + prepare_messages_storage_proof, + }, + mock::*, + }; + + use bp_header_chain::StoredHeaderDataBuilder; + use bp_runtime::{HeaderId, StorageProofError}; + use codec::Encode; + use sp_runtime::traits::Header; + + fn using_messages_proof( + nonces_end: MessageNonce, + outbound_lane_data: Option, + encode_message: impl Fn(MessageNonce, &MessagePayload) -> Option>, + encode_outbound_lane_data: impl Fn(&OutboundLaneData) -> Vec, + add_duplicate_key: bool, + add_unused_key: bool, + test: impl Fn(FromBridgedChainMessagesProof) -> R, + ) -> R { + let (state_root, storage_proof) = prepare_messages_storage_proof::( + TEST_LANE_ID, + 1..=nonces_end, + outbound_lane_data, + bp_runtime::UnverifiedStorageProofParams::default(), + generate_dummy_message, + encode_message, + encode_outbound_lane_data, + add_duplicate_key, + add_unused_key, + ); + + sp_io::TestExternalities::new(Default::default()).execute_with(move || { + let bridged_header = BridgedChainHeader::new( + 0, + Default::default(), + state_root, + Default::default(), + Default::default(), + ); + let bridged_header_hash = bridged_header.hash(); + + pallet_bridge_grandpa::BestFinalized::::put(HeaderId( + 0, + bridged_header_hash, + )); + pallet_bridge_grandpa::ImportedHeaders::::insert( + bridged_header_hash, + bridged_header.build(), + ); + test(FromBridgedChainMessagesProof { + bridged_header_hash, + storage_proof, + lane: TEST_LANE_ID, + nonces_start: 1, + nonces_end, + }) + }) + } + + #[test] + fn messages_proof_is_rejected_if_declared_less_than_actual_number_of_messages() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { verify_messages_proof::(proof, 5) } + ), + Err(VerificationError::MessagesCountMismatch), + ); + } + + #[test] + fn messages_proof_is_rejected_if_declared_more_than_actual_number_of_messages() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { verify_messages_proof::(proof, 15) } + ), + Err(VerificationError::MessagesCountMismatch), + ); + } + + #[test] + fn message_proof_is_rejected_if_header_is_missing_from_the_chain() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { + let bridged_header_hash = + pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; + pallet_bridge_grandpa::ImportedHeaders::::remove( + bridged_header_hash, + ); + verify_messages_proof::(proof, 10) + } + ), + Err(VerificationError::HeaderChain(HeaderChainError::UnknownHeader)), + ); + } + + #[test] + fn message_proof_is_rejected_if_header_state_root_mismatches() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { + let bridged_header_hash = + pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; + pallet_bridge_grandpa::ImportedHeaders::::insert( + bridged_header_hash, + BridgedChainHeader::new( + 0, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + .build(), + ); + verify_messages_proof::(proof, 10) + } + ), + Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( + StorageProofError::StorageRootMismatch + ))), + ); + } + + #[test] + fn message_proof_is_rejected_if_it_has_duplicate_trie_nodes() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + true, + false, + |proof| { verify_messages_proof::(proof, 10) }, + ), + Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( + StorageProofError::DuplicateNodes + ))), + ); + } + + #[test] + fn message_proof_is_rejected_if_it_has_unused_trie_nodes() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + true, + |proof| { verify_messages_proof::(proof, 10) }, + ), + Err(VerificationError::StorageProof(StorageProofError::UnusedKey)), + ); + } + + #[test] + fn message_proof_is_rejected_if_required_message_is_missing() { + matches!( + using_messages_proof( + 10, + None, + |n, m| if n != 5 { Some(m.encode()) } else { None }, + encode_lane_data, + false, + false, + |proof| verify_messages_proof::(proof, 10) + ), + Err(VerificationError::MessageStorage(StorageProofError::EmptyVal)), + ); + } + + #[test] + fn message_proof_is_rejected_if_message_decode_fails() { + matches!( + using_messages_proof( + 10, + None, + |n, m| { + let mut m = m.encode(); + if n == 5 { + m = vec![42] + } + Some(m) + }, + encode_lane_data, + false, + false, + |proof| verify_messages_proof::(proof, 10), + ), + Err(VerificationError::MessageStorage(StorageProofError::DecodeError)), + ); + } + + #[test] + fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() { + matches!( + using_messages_proof( + 10, + Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + encode_all_messages, + |d| { + let mut d = d.encode(); + d.truncate(1); + d + }, + false, + false, + |proof| verify_messages_proof::(proof, 10), + ), + Err(VerificationError::OutboundLaneStorage(StorageProofError::DecodeError)), + ); + } + + #[test] + fn message_proof_is_rejected_if_it_is_empty() { + assert_eq!( + using_messages_proof( + 0, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { verify_messages_proof::(proof, 0) }, + ), + Err(VerificationError::EmptyMessageProof), + ); + } + + #[test] + fn non_empty_message_proof_without_messages_is_accepted() { + assert_eq!( + using_messages_proof( + 0, + Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + encode_all_messages, + encode_lane_data, + false, + false, + |proof| verify_messages_proof::(proof, 0), + ), + Ok(vec![( + TEST_LANE_ID, + ProvedLaneMessages { + lane_state: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + messages: Vec::new(), + }, + )] + .into_iter() + .collect()), + ); + } + + #[test] + fn non_empty_message_proof_is_accepted() { + assert_eq!( + using_messages_proof( + 1, + Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + encode_all_messages, + encode_lane_data, + false, + false, + |proof| verify_messages_proof::(proof, 1), + ), + Ok(vec![( + TEST_LANE_ID, + ProvedLaneMessages { + lane_state: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + messages: vec![Message { + key: MessageKey { lane_id: TEST_LANE_ID, nonce: 1 }, + payload: vec![42], + }], + }, + )] + .into_iter() + .collect()), + ); + } + + #[test] + fn verify_messages_proof_does_not_panic_if_messages_count_mismatches() { + assert_eq!( + using_messages_proof( + 1, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |mut proof| { + proof.nonces_end = u64::MAX; + verify_messages_proof::(proof, u32::MAX) + }, + ), + Err(VerificationError::MessagesCountMismatch), + ); + } +} diff --git a/bridges/bin/runtime-common/src/messages_generation.rs b/bridges/modules/messages/src/tests/messages_generation.rs similarity index 62% rename from bridges/bin/runtime-common/src/messages_generation.rs rename to bridges/modules/messages/src/tests/messages_generation.rs index c37aaa5d4d5378a1b76507e017c73aec3c7aabbd..6c4867fa6de39fa97b35cbd1f270bdeff4c76dff 100644 --- a/bridges/bin/runtime-common/src/messages_generation.rs +++ b/bridges/modules/messages/src/tests/messages_generation.rs @@ -16,17 +16,23 @@ //! Helpers for generating message storage proofs, that are used by tests and by benchmarks. -use crate::messages::{AccountIdOf, BridgedChain, HashOf, HasherOf, MessageBridge, ThisChain}; - use bp_messages::{ - storage_keys, InboundLaneData, LaneId, MessageKey, MessageNonce, MessagePayload, - OutboundLaneData, + storage_keys, ChainWithMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, + MessagePayload, OutboundLaneData, +}; +use bp_runtime::{ + grow_storage_value, record_all_trie_keys, AccountIdOf, Chain, HashOf, HasherOf, + RawStorageProof, UnverifiedStorageProofParams, }; -use bp_runtime::{record_all_trie_keys, RawStorageProof, StorageProofSize}; use codec::Encode; use sp_std::{ops::RangeInclusive, prelude::*}; use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; +/// Dummy message generation function. +pub fn generate_dummy_message(_: MessageNonce) -> MessagePayload { + vec![42] +} + /// Simple and correct message data encode function. pub fn encode_all_messages(_: MessageNonce, m: &MessagePayload) -> Option> { Some(m.encode()) @@ -40,18 +46,20 @@ pub fn encode_lane_data(d: &OutboundLaneData) -> Vec { /// Prepare storage proof of given messages. /// /// Returns state trie root and nodes with prepared messages. -pub fn prepare_messages_storage_proof( +#[allow(clippy::too_many_arguments)] +pub fn prepare_messages_storage_proof( lane: LaneId, message_nonces: RangeInclusive, outbound_lane_data: Option, - size: StorageProofSize, - message_payload: MessagePayload, + proof_params: UnverifiedStorageProofParams, + generate_message: impl Fn(MessageNonce) -> MessagePayload, encode_message: impl Fn(MessageNonce, &MessagePayload) -> Option>, encode_outbound_lane_data: impl Fn(&OutboundLaneData) -> Vec, -) -> (HashOf>, RawStorageProof) + add_duplicate_key: bool, + add_unused_key: bool, +) -> (HashOf, RawStorageProof) where - B: MessageBridge, - HashOf>: Copy + Default, + HashOf: Copy + Default, { // prepare Bridged chain storage with messages and (optionally) outbound lane state let message_count = message_nonces.end().saturating_sub(*message_nonces.start()) + 1; @@ -60,22 +68,22 @@ where let mut mdb = MemoryDB::default(); { let mut trie = - TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); + TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); // insert messages for (i, nonce) in message_nonces.into_iter().enumerate() { let message_key = MessageKey { lane_id: lane, nonce }; - let message_payload = match encode_message(nonce, &message_payload) { + let message_payload = match encode_message(nonce, &generate_message(nonce)) { Some(message_payload) => if i == 0 { - grow_trie_leaf_value(message_payload, size) + grow_storage_value(message_payload, &proof_params) } else { message_payload }, None => continue, }; let storage_key = storage_keys::message_key( - B::BRIDGED_MESSAGES_PALLET_NAME, + ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &message_key.lane_id, message_key.nonce, ) @@ -89,8 +97,11 @@ where // insert outbound lane state if let Some(outbound_lane_data) = outbound_lane_data.as_ref().map(encode_outbound_lane_data) { - let storage_key = - storage_keys::outbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane).0; + let storage_key = storage_keys::outbound_lane_data_key( + ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + &lane, + ) + .0; trie.insert(&storage_key, &outbound_lane_data) .map_err(|_| "TrieMut::insert has failed") .expect("TrieMut::insert should not fail in benchmarks"); @@ -99,52 +110,54 @@ where } // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) - .map_err(|_| "record_all_trie_keys has failed") - .expect("record_all_trie_keys should not fail in benchmarks"); + let mut storage_proof = + record_all_trie_keys::>, _>(&mdb, &root) + .map_err(|_| "record_all_trie_keys has failed") + .expect("record_all_trie_keys should not fail in benchmarks"); + + if add_duplicate_key { + assert!(!storage_proof.is_empty()); + let node = storage_proof.pop().unwrap(); + storage_proof.push(node.clone()); + storage_proof.push(node); + } + + if add_unused_key { + storage_proof.push(b"unused_value".to_vec()); + } + (root, storage_proof) } /// Prepare storage proof of given messages delivery. /// /// Returns state trie root and nodes with prepared messages. -pub fn prepare_message_delivery_storage_proof( +pub fn prepare_message_delivery_storage_proof( lane: LaneId, - inbound_lane_data: InboundLaneData>>, - size: StorageProofSize, -) -> (HashOf>, RawStorageProof) + inbound_lane_data: InboundLaneData>, + proof_params: UnverifiedStorageProofParams, +) -> (HashOf, RawStorageProof) where - B: MessageBridge, + HashOf: Copy + Default, { // prepare Bridged chain storage with inbound lane state - let storage_key = storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane).0; + let storage_key = + storage_keys::inbound_lane_data_key(ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &lane).0; let mut root = Default::default(); let mut mdb = MemoryDB::default(); { let mut trie = - TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); - let inbound_lane_data = grow_trie_leaf_value(inbound_lane_data.encode(), size); + TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); + let inbound_lane_data = grow_storage_value(inbound_lane_data.encode(), &proof_params); trie.insert(&storage_key, &inbound_lane_data) .map_err(|_| "TrieMut::insert has failed") .expect("TrieMut::insert should not fail in benchmarks"); } // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) + let storage_proof = record_all_trie_keys::>, _>(&mdb, &root) .map_err(|_| "record_all_trie_keys has failed") .expect("record_all_trie_keys should not fail in benchmarks"); (root, storage_proof) } - -/// Add extra data to the trie leaf value so that it'll be of given size. -pub fn grow_trie_leaf_value(mut value: Vec, size: StorageProofSize) -> Vec { - match size { - StorageProofSize::Minimal(_) => (), - StorageProofSize::HasLargeLeaf(size) if size as usize > value.len() => { - value.extend(sp_std::iter::repeat(42u8).take(size as usize - value.len())); - }, - StorageProofSize::HasLargeLeaf(_) => (), - } - value -} diff --git a/bridges/modules/messages/src/mock.rs b/bridges/modules/messages/src/tests/mock.rs similarity index 62% rename from bridges/modules/messages/src/mock.rs rename to bridges/modules/messages/src/tests/mock.rs index ec63f15b94b5205d744b1379bd6697a4ae43534a..ffdd536830b5f7dde6671c75ba1ac108d7d218e4 100644 --- a/bridges/modules/messages/src/mock.rs +++ b/bridges/modules/messages/src/tests/mock.rs @@ -17,30 +17,43 @@ // From construct_runtime macro #![allow(clippy::from_over_into)] -use crate::{Config, StoredMessagePayload}; +use crate::{ + tests::messages_generation::{ + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, + }, + Config, StoredMessagePayload, +}; +use bp_header_chain::{ChainWithGrandpa, StoredHeaderData}; use bp_messages::{ calc_relayers_rewards, - source_chain::{DeliveryConfirmationPayments, OnMessagesDelivered, TargetHeaderChain}, + source_chain::{ + DeliveryConfirmationPayments, FromBridgedChainMessagesDeliveryProof, OnMessagesDelivered, + }, target_chain::{ - DeliveryPayments, DispatchMessage, DispatchMessageData, MessageDispatch, - ProvedLaneMessages, ProvedMessages, SourceHeaderChain, + DeliveryPayments, DispatchMessage, DispatchMessageData, FromBridgedChainMessagesProof, + MessageDispatch, }, - DeliveredMessages, InboundLaneData, LaneId, Message, MessageKey, MessageNonce, - UnrewardedRelayer, UnrewardedRelayersState, VerificationError, + ChainWithMessages, DeliveredMessages, InboundLaneData, LaneId, Message, MessageKey, + MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, +}; +use bp_runtime::{ + messages::MessageDispatchResult, Chain, ChainId, Size, UnverifiedStorageProofParams, }; -use bp_runtime::{messages::MessageDispatchResult, Size}; use codec::{Decode, Encode}; use frame_support::{ derive_impl, parameter_types, weights::{constants::RocksDbWeight, Weight}, }; use scale_info::TypeInfo; -use sp_runtime::BuildStorage; -use std::{ - collections::{BTreeMap, VecDeque}, - ops::RangeInclusive, +use sp_core::H256; +use sp_runtime::{ + testing::Header as SubstrateHeader, + traits::{BlakeTwo256, ConstU32}, + BuildStorage, StateVersion, }; +use std::{collections::VecDeque, ops::RangeInclusive}; pub type AccountId = u64; pub type Balance = u64; @@ -62,6 +75,77 @@ pub type TestMessageFee = u64; pub type TestRelayer = u64; pub type TestDispatchLevelResult = (); +pub struct ThisChain; + +impl Chain for ThisChain { + const ID: ChainId = *b"ttch"; + + type BlockNumber = u64; + type Hash = H256; + type Hasher = BlakeTwo256; + type Header = SubstrateHeader; + type AccountId = AccountId; + type Balance = Balance; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + + fn max_extrinsic_size() -> u32 { + u32::MAX + } + + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } +} + +impl ChainWithMessages for ThisChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = "WithThisChainBridgeMessages"; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 128; +} + +pub struct BridgedChain; + +pub type BridgedHeaderHash = H256; +pub type BridgedChainHeader = SubstrateHeader; + +impl Chain for BridgedChain { + const ID: ChainId = *b"tbch"; + + type BlockNumber = u64; + type Hash = BridgedHeaderHash; + type Hasher = BlakeTwo256; + type Header = BridgedChainHeader; + type AccountId = TestRelayer; + type Balance = Balance; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + + fn max_extrinsic_size() -> u32 { + 4096 + } + + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } +} + +impl ChainWithGrandpa for BridgedChain { + const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "WithBridgedChainBridgeGrandpa"; + const MAX_AUTHORITIES_COUNT: u32 = 16; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 4; + const MAX_MANDATORY_HEADER_SIZE: u32 = 4096; + const AVERAGE_HEADER_SIZE: u32 = 4096; +} + +impl ChainWithMessages for BridgedChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = "WithBridgedChainBridgeMessages"; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 128; +} + type Block = frame_system::mocking::MockBlock; use crate as pallet_bridge_messages; @@ -71,6 +155,7 @@ frame_support::construct_runtime! { { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Event}, + BridgedChainGrandpa: pallet_bridge_grandpa::{Pallet, Call, Event}, Messages: pallet_bridge_messages::{Pallet, Call, Event}, } } @@ -86,14 +171,20 @@ impl frame_system::Config for TestRuntime { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for TestRuntime { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } +impl pallet_bridge_grandpa::Config for TestRuntime { + type RuntimeEvent = RuntimeEvent; + type BridgedChain = BridgedChain; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<1_024>; + type HeadersToKeep = ConstU32<8>; + type WeightInfo = pallet_bridge_grandpa::weights::BridgeWeight; +} + parameter_types! { pub const MaxMessagesToPruneAtOnce: u64 = 10; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: u64 = 16; - pub const MaxUnconfirmedMessagesAtInboundLane: u64 = 128; pub const TestBridgedChainId: bp_runtime::ChainId = *b"test"; pub const ActiveOutboundLanes: &'static [LaneId] = &[TEST_LANE_ID, TEST_LANE_ID_2]; } @@ -104,24 +195,22 @@ pub type TestWeightInfo = (); impl Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type WeightInfo = TestWeightInfo; + + type ThisChain = ThisChain; + type BridgedChain = BridgedChain; + type BridgedHeaderChain = BridgedChainGrandpa; + type ActiveOutboundLanes = ActiveOutboundLanes; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = frame_support::traits::ConstU32; type OutboundPayload = TestPayload; type InboundPayload = TestPayload; - type InboundRelayer = TestRelayer; type DeliveryPayments = TestDeliveryPayments; - type TargetHeaderChain = TestTargetHeaderChain; type DeliveryConfirmationPayments = TestDeliveryConfirmationPayments; type OnMessagesDelivered = TestOnMessagesDelivered; - type SourceHeaderChain = TestSourceHeaderChain; type MessageDispatch = TestMessageDispatch; - type BridgedChainId = TestBridgedChainId; } #[cfg(feature = "runtime-benchmarks")] @@ -132,29 +221,26 @@ impl crate::benchmarking::Config<()> for TestRuntime { fn prepare_message_proof( params: crate::benchmarking::MessageProofParams, - ) -> (TestMessagesProof, Weight) { - // in mock run we only care about benchmarks correctness, not the benchmark results - // => ignore size related arguments - let (messages, total_dispatch_weight) = - params.message_nonces.into_iter().map(|n| message(n, REGULAR_PAYLOAD)).fold( - (Vec::new(), Weight::zero()), - |(mut messages, total_dispatch_weight), message| { - let weight = REGULAR_PAYLOAD.declared_weight; - messages.push(message); - (messages, total_dispatch_weight.saturating_add(weight)) - }, - ); - let mut proof: TestMessagesProof = Ok(messages).into(); - proof.result.as_mut().unwrap().get_mut(0).unwrap().1.lane_state = params.outbound_lane_data; - (proof, total_dispatch_weight) + ) -> (FromBridgedChainMessagesProof, Weight) { + use bp_runtime::RangeInclusiveExt; + + let dispatch_weight = + REGULAR_PAYLOAD.declared_weight * params.message_nonces.checked_len().unwrap_or(0); + ( + *prepare_messages_proof( + params.message_nonces.into_iter().map(|n| message(n, REGULAR_PAYLOAD)).collect(), + params.outbound_lane_data, + ), + dispatch_weight, + ) } fn prepare_message_delivery_proof( params: crate::benchmarking::MessageDeliveryProofParams, - ) -> TestMessagesDeliveryProof { + ) -> FromBridgedChainMessagesDeliveryProof { // in mock run we only care about benchmarks correctness, not the benchmark results // => ignore size related arguments - TestMessagesDeliveryProof(Ok((params.lane, params.inbound_lane_data))) + prepare_messages_delivery_proof(params.lane, params.inbound_lane_data) } fn is_relayer_rewarded(_relayer: &AccountId) -> bool { @@ -168,9 +254,6 @@ impl Size for TestPayload { } } -/// Maximal outbound payload size. -pub const MAX_OUTBOUND_PAYLOAD_SIZE: u32 = 4096; - /// Account that has balance to use in tests. pub const ENDOWED_ACCOUNT: AccountId = 0xDEAD; @@ -183,9 +266,6 @@ pub const TEST_RELAYER_B: AccountId = 101; /// Account id of additional test relayer - C. pub const TEST_RELAYER_C: AccountId = 102; -/// Error that is returned by all test implementations. -pub const TEST_ERROR: &str = "Test error"; - /// Lane that we're using in tests. pub const TEST_LANE_ID: LaneId = LaneId([0, 0, 0, 1]); @@ -198,71 +278,6 @@ pub const TEST_LANE_ID_3: LaneId = LaneId([0, 0, 0, 3]); /// Regular message payload. pub const REGULAR_PAYLOAD: TestPayload = message_payload(0, 50); -/// Payload that is rejected by `TestTargetHeaderChain`. -pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = message_payload(1, 50); - -/// Vec of proved messages, grouped by lane. -pub type MessagesByLaneVec = Vec<(LaneId, ProvedLaneMessages)>; - -/// Test messages proof. -#[derive(Debug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] -pub struct TestMessagesProof { - pub result: Result, -} - -impl Size for TestMessagesProof { - fn size(&self) -> u32 { - 0 - } -} - -impl From, ()>> for TestMessagesProof { - fn from(result: Result, ()>) -> Self { - Self { - result: result.map(|messages| { - let mut messages_by_lane: BTreeMap> = - BTreeMap::new(); - for message in messages { - messages_by_lane.entry(message.key.lane_id).or_default().messages.push(message); - } - messages_by_lane.into_iter().collect() - }), - } - } -} - -/// Messages delivery proof used in tests. -#[derive(Debug, Encode, Decode, Eq, Clone, PartialEq, TypeInfo)] -pub struct TestMessagesDeliveryProof(pub Result<(LaneId, InboundLaneData), ()>); - -impl Size for TestMessagesDeliveryProof { - fn size(&self) -> u32 { - 0 - } -} - -/// Target header chain that is used in tests. -#[derive(Debug, Default)] -pub struct TestTargetHeaderChain; - -impl TargetHeaderChain for TestTargetHeaderChain { - type MessagesDeliveryProof = TestMessagesDeliveryProof; - - fn verify_message(payload: &TestPayload) -> Result<(), VerificationError> { - if *payload == PAYLOAD_REJECTED_BY_TARGET_CHAIN { - Err(VerificationError::Other(TEST_ERROR)) - } else { - Ok(()) - } - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), VerificationError> { - proof.0.map_err(|_| VerificationError::Other(TEST_ERROR)) - } -} - /// Reward payments at the target chain during delivery transaction. #[derive(Debug, Default)] pub struct TestDeliveryPayments; @@ -323,24 +338,6 @@ impl DeliveryConfirmationPayments for TestDeliveryConfirmationPayment } } -/// Source header chain that is used in tests. -#[derive(Debug)] -pub struct TestSourceHeaderChain; - -impl SourceHeaderChain for TestSourceHeaderChain { - type MessagesProof = TestMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result, VerificationError> { - proof - .result - .map(|proof| proof.into_iter().collect()) - .map_err(|_| VerificationError::Other(TEST_ERROR)) - } -} - /// Test message dispatcher. #[derive(Debug)] pub struct TestMessageDispatch; @@ -459,3 +456,75 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pub fn run_test(test: impl FnOnce() -> T) -> T { new_test_ext().execute_with(test) } + +/// Prepare valid storage proof for given messages and insert appropriate header to the +/// bridged header chain. +/// +/// Since this function changes the runtime storage, you can't "inline" it in the +/// `asset_noop` macro calls. +pub fn prepare_messages_proof( + messages: Vec, + outbound_lane_data: Option, +) -> Box> { + // first - let's generate storage proof + let lane = messages.first().unwrap().key.lane_id; + let nonces_start = messages.first().unwrap().key.nonce; + let nonces_end = messages.last().unwrap().key.nonce; + let (storage_root, storage_proof) = prepare_messages_storage_proof::( + TEST_LANE_ID, + nonces_start..=nonces_end, + outbound_lane_data, + UnverifiedStorageProofParams::default(), + |nonce| messages[(nonce - nonces_start) as usize].payload.clone(), + encode_all_messages, + encode_lane_data, + false, + false, + ); + + // let's now insert bridged chain header into the storage + let bridged_header_hash = Default::default(); + pallet_bridge_grandpa::ImportedHeaders::::insert( + bridged_header_hash, + StoredHeaderData { number: 0, state_root: storage_root }, + ); + + Box::new(FromBridgedChainMessagesProof:: { + bridged_header_hash, + storage_proof, + lane, + nonces_start, + nonces_end, + }) +} + +/// Prepare valid storage proof for given messages and insert appropriate header to the +/// bridged header chain. +/// +/// Since this function changes the runtime storage, you can't "inline" it in the +/// `asset_noop` macro calls. +pub fn prepare_messages_delivery_proof( + lane: LaneId, + inbound_lane_data: InboundLaneData, +) -> FromBridgedChainMessagesDeliveryProof { + // first - let's generate storage proof + let (storage_root, storage_proof) = + prepare_message_delivery_storage_proof::( + lane, + inbound_lane_data, + UnverifiedStorageProofParams::default(), + ); + + // let's now insert bridged chain header into the storage + let bridged_header_hash = Default::default(); + pallet_bridge_grandpa::ImportedHeaders::::insert( + bridged_header_hash, + StoredHeaderData { number: 0, state_root: storage_root }, + ); + + FromBridgedChainMessagesDeliveryProof:: { + bridged_header_hash, + storage_proof, + lane, + } +} diff --git a/bridges/modules/messages/src/tests/mod.rs b/bridges/modules/messages/src/tests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..c3bde5fc275849fab930ad0b1ec9f0b4e80ce4ea --- /dev/null +++ b/bridges/modules/messages/src/tests/mod.rs @@ -0,0 +1,26 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tests and test helpers for messages pallet. + +#![cfg(any(feature = "test-helpers", test))] + +#[cfg(test)] +pub(crate) mod mock; +#[cfg(test)] +mod pallet_tests; + +pub mod messages_generation; diff --git a/bridges/modules/messages/src/tests/pallet_tests.rs b/bridges/modules/messages/src/tests/pallet_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..42e1042717de07656630855c00967361be78f664 --- /dev/null +++ b/bridges/modules/messages/src/tests/pallet_tests.rs @@ -0,0 +1,1100 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Pallet-level tests. + +use crate::{ + outbound_lane, + outbound_lane::ReceptionConfirmationError, + tests::mock::{self, RuntimeEvent as TestEvent, *}, + weights_ext::WeightInfoExt, + Call, Config, Error, Event, InboundLanes, MaybeOutboundLanesCount, OutboundLanes, + OutboundMessages, Pallet, PalletOperatingMode, PalletOwner, RuntimeInboundLaneStorage, + StoredInboundLaneData, +}; + +use bp_messages::{ + source_chain::{FromBridgedChainMessagesDeliveryProof, MessagesBridge}, + target_chain::FromBridgedChainMessagesProof, + BridgeMessagesCall, ChainWithMessages, DeliveredMessages, InboundLaneData, + InboundMessageDetails, LaneId, MessageKey, MessageNonce, MessagesOperatingMode, + OutboundLaneData, OutboundMessageDetails, UnrewardedRelayer, UnrewardedRelayersState, + VerificationError, +}; +use bp_runtime::{BasicOperatingMode, PreComputedSize, RangeInclusiveExt, Size}; +use bp_test_utils::generate_owned_bridge_module_tests; +use codec::Encode; +use frame_support::{ + assert_noop, assert_ok, + dispatch::Pays, + storage::generator::{StorageMap, StorageValue}, + traits::Hooks, + weights::Weight, +}; +use frame_system::{EventRecord, Pallet as System, Phase}; +use sp_core::Get; +use sp_runtime::DispatchError; + +fn get_ready_for_events() { + System::::set_block_number(1); + System::::reset_events(); +} + +fn send_regular_message(lane_id: LaneId) { + get_ready_for_events(); + + let outbound_lane = outbound_lane::(lane_id); + let message_nonce = outbound_lane.data().latest_generated_nonce + 1; + let prev_enqueued_messages = outbound_lane.data().queued_messages().saturating_len(); + let valid_message = Pallet::::validate_message(lane_id, ®ULAR_PAYLOAD) + .expect("validate_message has failed"); + let artifacts = Pallet::::send_message(valid_message); + assert_eq!(artifacts.enqueued_messages, prev_enqueued_messages + 1); + + // check event with assigned nonce + assert_eq!( + System::::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::Messages(Event::MessageAccepted { lane_id, nonce: message_nonce }), + topics: vec![], + }], + ); +} + +fn receive_messages_delivery_proof() { + System::::set_block_number(1); + System::::reset_events(); + + assert_ok!(Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![UnrewardedRelayer { + relayer: 0, + messages: DeliveredMessages::new(1), + }] + .into(), + }, + ), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + )); + + assert_eq!( + System::::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::Messages(Event::MessagesDelivered { + lane_id: TEST_LANE_ID, + messages: DeliveredMessages::new(1), + }), + topics: vec![], + }], + ); +} + +#[test] +fn pallet_rejects_transactions_if_halted() { + run_test(|| { + // send message first to be able to check that delivery_proof fails later + send_regular_message(TEST_LANE_ID); + + PalletOperatingMode::::put(MessagesOperatingMode::Basic( + BasicOperatingMode::Halted, + )); + + assert_noop!( + Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), + Error::::NotOperatingNormally, + ); + + let messages_proof = prepare_messages_proof(vec![message(2, REGULAR_PAYLOAD)], None); + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + messages_proof, + 1, + REGULAR_PAYLOAD.declared_weight, + ), + Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), + ); + + let delivery_proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into(), + }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + delivery_proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + ), + Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), + ); + }); +} + +#[test] +fn receive_messages_fails_if_dispatcher_is_inactive() { + run_test(|| { + TestMessageDispatch::deactivate(); + let proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + 1, + REGULAR_PAYLOAD.declared_weight, + ), + Error::::MessageDispatchInactive, + ); + }); +} + +#[test] +fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() { + run_test(|| { + // send message first to be able to check that delivery_proof fails later + send_regular_message(TEST_LANE_ID); + + PalletOperatingMode::::put( + MessagesOperatingMode::RejectingOutboundMessages, + ); + + assert_noop!( + Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), + Error::::NotOperatingNormally, + ); + + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None), + 1, + REGULAR_PAYLOAD.declared_weight, + ),); + + assert_ok!(Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into(), + }, + ), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + )); + }); +} + +#[test] +fn send_message_works() { + run_test(|| { + send_regular_message(TEST_LANE_ID); + }); +} + +#[test] +fn send_message_rejects_too_large_message() { + run_test(|| { + let mut message_payload = message_payload(1, 0); + // the payload isn't simply extra, so it'll definitely overflow + // `max_outbound_payload_size` if we add `max_outbound_payload_size` bytes to extra + let max_outbound_payload_size = BridgedChain::maximal_incoming_message_size(); + message_payload + .extra + .extend_from_slice(&vec![0u8; max_outbound_payload_size as usize]); + assert_noop!( + Pallet::::validate_message(TEST_LANE_ID, &message_payload.clone(),), + Error::::MessageRejectedByPallet(VerificationError::MessageTooLarge), + ); + + // let's check that we're able to send `max_outbound_payload_size` messages + while message_payload.encoded_size() as u32 > max_outbound_payload_size { + message_payload.extra.pop(); + } + assert_eq!(message_payload.encoded_size() as u32, max_outbound_payload_size); + + let valid_message = + Pallet::::validate_message(TEST_LANE_ID, &message_payload) + .expect("validate_message has failed"); + Pallet::::send_message(valid_message); + }) +} + +#[test] +fn receive_messages_proof_works() { + run_test(|| { + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None), + 1, + REGULAR_PAYLOAD.declared_weight, + )); + + assert_eq!(InboundLanes::::get(TEST_LANE_ID).0.last_delivered_nonce(), 1); + + assert!(TestDeliveryPayments::is_reward_paid(1)); + }); +} + +#[test] +fn receive_messages_proof_updates_confirmed_message_nonce() { + run_test(|| { + // say we have received 10 messages && last confirmed message is 8 + InboundLanes::::insert( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 8, + relayers: vec![ + unrewarded_relayer(9, 9, TEST_RELAYER_A), + unrewarded_relayer(10, 10, TEST_RELAYER_B), + ] + .into(), + }, + ); + assert_eq!( + inbound_unrewarded_relayers_state(TEST_LANE_ID), + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + messages_in_oldest_entry: 1, + total_messages: 2, + last_delivered_nonce: 10, + }, + ); + + // message proof includes outbound lane state with latest confirmed message updated to 9 + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof( + vec![message(11, REGULAR_PAYLOAD)], + Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() }), + ), + 1, + REGULAR_PAYLOAD.declared_weight, + )); + + assert_eq!( + InboundLanes::::get(TEST_LANE_ID).0, + InboundLaneData { + last_confirmed_nonce: 9, + relayers: vec![ + unrewarded_relayer(10, 10, TEST_RELAYER_B), + unrewarded_relayer(11, 11, TEST_RELAYER_A) + ] + .into(), + }, + ); + assert_eq!( + inbound_unrewarded_relayers_state(TEST_LANE_ID), + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + messages_in_oldest_entry: 1, + total_messages: 2, + last_delivered_nonce: 11, + }, + ); + }); +} + +#[test] +fn receive_messages_proof_does_not_accept_message_if_dispatch_weight_is_not_enough() { + run_test(|| { + let proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + let mut declared_weight = REGULAR_PAYLOAD.declared_weight; + *declared_weight.ref_time_mut() -= 1; + + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + 1, + declared_weight, + ), + Error::::InsufficientDispatchWeight + ); + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); + }); +} + +#[test] +fn receive_messages_proof_rejects_invalid_proof() { + run_test(|| { + let mut proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + proof.nonces_end += 1; + + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + 1, + Weight::zero(), + ), + Error::::InvalidMessagesProof, + ); + }); +} + +#[test] +fn receive_messages_proof_rejects_proof_with_too_many_messages() { + run_test(|| { + let proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + u32::MAX, + Weight::zero(), + ), + Error::::TooManyMessagesInTheProof, + ); + }); +} + +#[test] +fn receive_messages_delivery_proof_works() { + run_test(|| { + send_regular_message(TEST_LANE_ID); + receive_messages_delivery_proof(); + + assert_eq!(OutboundLanes::::get(TEST_LANE_ID).latest_received_nonce, 1,); + }); +} + +#[test] +fn receive_messages_delivery_proof_rewards_relayers() { + run_test(|| { + send_regular_message(TEST_LANE_ID); + send_regular_message(TEST_LANE_ID); + + // this reports delivery of message 1 => reward is paid to TEST_RELAYER_A + let single_message_delivery_proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into(), + ..Default::default() + }, + ); + let single_message_delivery_proof_size = single_message_delivery_proof.size(); + let result = Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + single_message_delivery_proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + ); + assert_ok!(result); + assert_eq!( + result.unwrap().actual_weight.unwrap(), + TestWeightInfo::receive_messages_delivery_proof_weight( + &PreComputedSize(single_message_delivery_proof_size as _), + &UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }, + ) + ); + assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); + assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); + + // this reports delivery of both message 1 and message 2 => reward is paid only to + // TEST_RELAYER_B + let two_messages_delivery_proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B), + ] + .into(), + ..Default::default() + }, + ); + let two_messages_delivery_proof_size = two_messages_delivery_proof.size(); + let result = Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + two_messages_delivery_proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + messages_in_oldest_entry: 1, + total_messages: 2, + last_delivered_nonce: 2, + }, + ); + assert_ok!(result); + // even though the pre-dispatch weight was for two messages, the actual weight is + // for single message only + assert_eq!( + result.unwrap().actual_weight.unwrap(), + TestWeightInfo::receive_messages_delivery_proof_weight( + &PreComputedSize(two_messages_delivery_proof_size as _), + &UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }, + ) + ); + assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); + assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); + assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 0))); + }); +} + +#[test] +fn receive_messages_delivery_proof_rejects_invalid_proof() { + run_test(|| { + let mut proof = prepare_messages_delivery_proof(TEST_LANE_ID, Default::default()); + proof.lane = bp_messages::LaneId([42, 42, 42, 42]); + + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + Default::default(), + ), + Error::::InvalidMessagesDeliveryProof, + ); + }); +} + +#[test] +fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() { + run_test(|| { + // when number of relayers entries is invalid + let proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B), + ] + .into(), + ..Default::default() + }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 2, + last_delivered_nonce: 2, + ..Default::default() + }, + ), + Error::::InvalidUnrewardedRelayersState, + ); + + // when number of messages is invalid + let proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B), + ] + .into(), + ..Default::default() + }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + total_messages: 1, + last_delivered_nonce: 2, + ..Default::default() + }, + ), + Error::::InvalidUnrewardedRelayersState, + ); + + // when last delivered nonce is invalid + let proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B), + ] + .into(), + ..Default::default() + }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + total_messages: 2, + last_delivered_nonce: 8, + ..Default::default() + }, + ), + Error::::InvalidUnrewardedRelayersState, + ); + }); +} + +#[test] +fn receive_messages_accepts_single_message_with_invalid_payload() { + run_test(|| { + let mut invalid_message = message(1, REGULAR_PAYLOAD); + invalid_message.payload = Vec::new(); + + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof(vec![invalid_message], None), + 1, + Weight::zero(), /* weight may be zero in this case (all messages are + * improperly encoded) */ + ),); + + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 1,); + }); +} + +#[test] +fn receive_messages_accepts_batch_with_message_with_invalid_payload() { + run_test(|| { + let mut invalid_message = message(2, REGULAR_PAYLOAD); + invalid_message.payload = Vec::new(); + + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof( + vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),], + None + ), + 3, + REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight, + ),); + + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 3,); + }); +} + +#[test] +fn actual_dispatch_weight_does_not_overflow() { + run_test(|| { + let message1 = message(1, message_payload(0, u64::MAX / 2)); + let message2 = message(2, message_payload(0, u64::MAX / 2)); + let message3 = message(3, message_payload(0, u64::MAX / 2)); + + let proof = prepare_messages_proof(vec![message1, message2, message3], None); + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + // this may cause overflow if source chain storage is invalid + proof, + 3, + Weight::MAX, + ), + Error::::InsufficientDispatchWeight + ); + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); + }); +} + +#[test] +fn ref_time_refund_from_receive_messages_proof_works() { + run_test(|| { + fn submit_with_unspent_weight( + nonce: MessageNonce, + unspent_weight: u64, + ) -> (Weight, Weight) { + let mut payload = REGULAR_PAYLOAD; + *payload.dispatch_result.unspent_weight.ref_time_mut() = unspent_weight; + let proof = prepare_messages_proof(vec![message(nonce, payload)], None); + let messages_count = 1; + let pre_dispatch_weight = + ::WeightInfo::receive_messages_proof_weight( + &*proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ); + let result = Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ) + .expect("delivery has failed"); + let post_dispatch_weight = + result.actual_weight.expect("receive_messages_proof always returns Some"); + + // message delivery transactions are never free + assert_eq!(result.pays_fee, Pays::Yes); + + (pre_dispatch_weight, post_dispatch_weight) + } + + // when dispatch is returning `unspent_weight < declared_weight` + let (pre, post) = submit_with_unspent_weight(1, 1); + assert_eq!(post.ref_time(), pre.ref_time() - 1); + + // when dispatch is returning `unspent_weight = declared_weight` + let (pre, post) = submit_with_unspent_weight(2, REGULAR_PAYLOAD.declared_weight.ref_time()); + assert_eq!(post.ref_time(), pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time()); + + // when dispatch is returning `unspent_weight > declared_weight` + let (pre, post) = + submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight.ref_time() + 1); + assert_eq!(post.ref_time(), pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time()); + + // when there's no unspent weight + let (pre, post) = submit_with_unspent_weight(4, 0); + assert_eq!(post.ref_time(), pre.ref_time()); + + // when dispatch is returning `unspent_weight < declared_weight` + let (pre, post) = submit_with_unspent_weight(5, 1); + assert_eq!(post.ref_time(), pre.ref_time() - 1); + }); +} + +#[test] +fn proof_size_refund_from_receive_messages_proof_works() { + run_test(|| { + let max_entries = BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX as usize; + + // if there's maximal number of unrewarded relayer entries at the inbound lane, then + // `proof_size` is unchanged in post-dispatch weight + let proof = prepare_messages_proof(vec![message(101, REGULAR_PAYLOAD)], None); + let messages_count = 1; + let pre_dispatch_weight = + ::WeightInfo::receive_messages_proof_weight( + &*proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ); + InboundLanes::::insert( + TEST_LANE_ID, + StoredInboundLaneData(InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: 42, + messages: DeliveredMessages { begin: 0, end: 100 } + }; + max_entries + ] + .into(), + last_confirmed_nonce: 0, + }), + ); + let post_dispatch_weight = Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof.clone(), + messages_count, + REGULAR_PAYLOAD.declared_weight, + ) + .unwrap() + .actual_weight + .unwrap(); + assert_eq!(post_dispatch_weight.proof_size(), pre_dispatch_weight.proof_size()); + + // if count of unrewarded relayer entries is less than maximal, then some `proof_size` + // must be refunded + InboundLanes::::insert( + TEST_LANE_ID, + StoredInboundLaneData(InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: 42, + messages: DeliveredMessages { begin: 0, end: 100 } + }; + max_entries - 1 + ] + .into(), + last_confirmed_nonce: 0, + }), + ); + let post_dispatch_weight = Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ) + .unwrap() + .actual_weight + .unwrap(); + assert!( + post_dispatch_weight.proof_size() < pre_dispatch_weight.proof_size(), + "Expected post-dispatch PoV {} to be less than pre-dispatch PoV {}", + post_dispatch_weight.proof_size(), + pre_dispatch_weight.proof_size(), + ); + }); +} + +#[test] +fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected() +{ + run_test(|| { + // send message first to be able to check that delivery_proof fails later + send_regular_message(TEST_LANE_ID); + + // 1) InboundLaneData declares that the `last_confirmed_nonce` is 1; + // 2) InboundLaneData has no entries => `InboundLaneData::last_delivered_nonce()` returns + // `last_confirmed_nonce`; + // 3) it means that we're going to confirm delivery of messages 1..=1; + // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and numer of + // actually confirmed messages is `1`. + let proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + UnrewardedRelayersState { last_delivered_nonce: 1, ..Default::default() }, + ), + Error::::ReceptionConfirmation( + ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected + ), + ); + }); +} + +#[test] +fn storage_keys_computed_properly() { + assert_eq!( + PalletOperatingMode::::storage_value_final_key().to_vec(), + bp_messages::storage_keys::operating_mode_key("Messages").0, + ); + + assert_eq!( + OutboundMessages::::storage_map_final_key(MessageKey { + lane_id: TEST_LANE_ID, + nonce: 42 + }), + bp_messages::storage_keys::message_key("Messages", &TEST_LANE_ID, 42).0, + ); + + assert_eq!( + OutboundLanes::::storage_map_final_key(TEST_LANE_ID), + bp_messages::storage_keys::outbound_lane_data_key("Messages", &TEST_LANE_ID).0, + ); + + assert_eq!( + InboundLanes::::storage_map_final_key(TEST_LANE_ID), + bp_messages::storage_keys::inbound_lane_data_key("Messages", &TEST_LANE_ID).0, + ); +} + +#[test] +fn inbound_message_details_works() { + run_test(|| { + assert_eq!( + Pallet::::inbound_message_data( + TEST_LANE_ID, + REGULAR_PAYLOAD.encode(), + OutboundMessageDetails { nonce: 0, dispatch_weight: Weight::zero(), size: 0 }, + ), + InboundMessageDetails { dispatch_weight: REGULAR_PAYLOAD.declared_weight }, + ); + }); +} + +#[test] +fn on_idle_callback_respects_remaining_weight() { + run_test(|| { + send_regular_message(TEST_LANE_ID); + send_regular_message(TEST_LANE_ID); + send_regular_message(TEST_LANE_ID); + send_regular_message(TEST_LANE_ID); + + assert_ok!(Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 4, + relayers: vec![unrewarded_relayer(1, 4, TEST_RELAYER_A)].into(), + }, + ), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 4, + total_messages: 4, + last_delivered_nonce: 4, + }, + )); + + // all 4 messages may be pruned now + assert_eq!(outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, 4); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 1); + System::::set_block_number(2); + + // if passed wight is too low to do anything + let dbw = DbWeight::get(); + assert_eq!(Pallet::::on_idle(0, dbw.reads_writes(1, 1)), Weight::zero(),); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 1); + + // if passed wight is enough to prune single message + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(1, 2)), + dbw.reads_writes(1, 2), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 2); + + // if passed wight is enough to prune two more messages + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(1, 3)), + dbw.reads_writes(1, 3), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 4); + + // if passed wight is enough to prune many messages + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(100, 100)), + dbw.reads_writes(1, 2), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 5); + }); +} + +#[test] +fn on_idle_callback_is_rotating_lanes_to_prune() { + run_test(|| { + // send + receive confirmation for lane 1 + send_regular_message(TEST_LANE_ID); + receive_messages_delivery_proof(); + // send + receive confirmation for lane 2 + send_regular_message(TEST_LANE_ID_2); + assert_ok!(Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + prepare_messages_delivery_proof( + TEST_LANE_ID_2, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into(), + }, + ), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + )); + + // nothing is pruned yet + assert_eq!(outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, 1); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 1); + assert_eq!( + outbound_lane::(TEST_LANE_ID_2).data().latest_received_nonce, + 1 + ); + assert_eq!( + outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, + 1 + ); + + // in block#2.on_idle lane messages of lane 1 are pruned + let dbw = DbWeight::get(); + System::::set_block_number(2); + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(100, 100)), + dbw.reads_writes(1, 2), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 2); + assert_eq!( + outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, + 1 + ); + + // in block#3.on_idle lane messages of lane 2 are pruned + System::::set_block_number(3); + + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(100, 100)), + dbw.reads_writes(1, 2), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 2); + assert_eq!( + outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, + 2 + ); + }); +} + +#[test] +fn outbound_message_from_unconfigured_lane_is_rejected() { + run_test(|| { + assert_noop!( + Pallet::::validate_message(TEST_LANE_ID_3, ®ULAR_PAYLOAD,), + Error::::InactiveOutboundLane, + ); + }); +} + +#[test] +fn test_bridge_messages_call_is_correctly_defined() { + run_test(|| { + let account_id = 1; + let message_proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + let message_delivery_proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![UnrewardedRelayer { + relayer: 0, + messages: DeliveredMessages::new(1), + }] + .into(), + }, + ); + let unrewarded_relayer_state = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + last_delivered_nonce: 1, + ..Default::default() + }; + + let direct_receive_messages_proof_call = Call::::receive_messages_proof { + relayer_id_at_bridged_chain: account_id, + proof: message_proof.clone(), + messages_count: 1, + dispatch_weight: REGULAR_PAYLOAD.declared_weight, + }; + let indirect_receive_messages_proof_call = BridgeMessagesCall::< + AccountId, + FromBridgedChainMessagesProof, + FromBridgedChainMessagesDeliveryProof, + >::receive_messages_proof { + relayer_id_at_bridged_chain: account_id, + proof: *message_proof, + messages_count: 1, + dispatch_weight: REGULAR_PAYLOAD.declared_weight, + }; + assert_eq!( + direct_receive_messages_proof_call.encode(), + indirect_receive_messages_proof_call.encode() + ); + + let direct_receive_messages_delivery_proof_call = + Call::::receive_messages_delivery_proof { + proof: message_delivery_proof.clone(), + relayers_state: unrewarded_relayer_state.clone(), + }; + let indirect_receive_messages_delivery_proof_call = BridgeMessagesCall::< + AccountId, + FromBridgedChainMessagesProof, + FromBridgedChainMessagesDeliveryProof, + >::receive_messages_delivery_proof { + proof: message_delivery_proof, + relayers_state: unrewarded_relayer_state, + }; + assert_eq!( + direct_receive_messages_delivery_proof_call.encode(), + indirect_receive_messages_delivery_proof_call.encode() + ); + }); +} + +generate_owned_bridge_module_tests!( + MessagesOperatingMode::Basic(BasicOperatingMode::Normal), + MessagesOperatingMode::Basic(BasicOperatingMode::Halted) +); + +#[test] +fn inbound_storage_extra_proof_size_bytes_works() { + fn relayer_entry() -> UnrewardedRelayer { + UnrewardedRelayer { relayer: 42u64, messages: DeliveredMessages { begin: 0, end: 100 } } + } + + fn storage(relayer_entries: usize) -> RuntimeInboundLaneStorage { + RuntimeInboundLaneStorage { + lane_id: Default::default(), + cached_data: Some(InboundLaneData { + relayers: vec![relayer_entry(); relayer_entries].into(), + last_confirmed_nonce: 0, + }), + _phantom: Default::default(), + } + } + + let max_entries = BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX as usize; + + // when we have exactly `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` unrewarded relayers + assert_eq!(storage(max_entries).extra_proof_size_bytes(), 0); + + // when we have less than `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` unrewarded relayers + assert_eq!( + storage(max_entries - 1).extra_proof_size_bytes(), + relayer_entry().encode().len() as u64 + ); + assert_eq!( + storage(max_entries - 2).extra_proof_size_bytes(), + 2 * relayer_entry().encode().len() as u64 + ); + + // when we have more than `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` unrewarded relayers + // (shall not happen in practice) + assert_eq!(storage(max_entries + 1).extra_proof_size_bytes(), 0); +} + +#[test] +fn maybe_outbound_lanes_count_returns_correct_value() { + assert_eq!( + MaybeOutboundLanesCount::::get(), + Some(mock::ActiveOutboundLanes::get().len() as u32) + ); +} diff --git a/bridges/modules/messages/src/weights.rs b/bridges/modules/messages/src/weights.rs index 5bf7d56756079df8a5e469b9c50ba7607b65d983..72a06599b1655c52b9761c1b9af7c8d798631ddf 100644 --- a/bridges/modules/messages/src/weights.rs +++ b/bridges/modules/messages/src/weights.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for pallet_bridge_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz` +//! HOSTNAME: `serban-ROG-Zephyrus`, CPU: `12th Gen Intel(R) Core(TM) i7-12700H` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -51,14 +51,13 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bridge_messages. pub trait WeightInfo { fn receive_single_message_proof() -> Weight; - fn receive_two_messages_proof() -> Weight; + fn receive_n_messages_proof(n: u32) -> Weight; fn receive_single_message_proof_with_outbound_lane_state() -> Weight; - fn receive_single_message_proof_1_kb() -> Weight; - fn receive_single_message_proof_16_kb() -> Weight; + fn receive_single_n_bytes_message_proof(n: u32) -> Weight; fn receive_delivery_proof_for_single_message() -> Weight; fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight; fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight; - fn receive_single_message_proof_with_dispatch(i: u32) -> Weight; + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32) -> Weight; } /// Weights for `pallet_bridge_messages` that are generated using one of the Bridge testnets. @@ -82,56 +81,39 @@ impl WeightInfo for BridgeWeight { /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_321 nanoseconds. - Weight::from_parts(54_478_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_724 nanoseconds. + Weight::from_parts(40_650_000, 52673) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_597 nanoseconds. - Weight::from_parts(69_267_000, 57170) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) + /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49208), added: + /// 51683, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) + /// The range of component `n` is `[1, 1004]`. /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { + /// The range of component `n` is `[1, 1004]`. + fn receive_n_messages_proof(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_079 nanoseconds. - Weight::from_parts(65_905_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 39_354 nanoseconds. + Weight::from_parts(29_708_543, 52673) + // Standard Error: 1_185 + .saturating_add(Weight::from_parts(7_648_787, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,12 +131,12 @@ impl WeightInfo for BridgeWeight { /// /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_1_kb() -> Weight { + fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 50_588 nanoseconds. - Weight::from_parts(53_544_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 45_578 nanoseconds. + Weight::from_parts(47_161_000, 52673) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -172,12 +154,16 @@ impl WeightInfo for BridgeWeight { /// /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_16_kb() -> Weight { + /// + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 78_269 nanoseconds. - Weight::from_parts(81_748_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_702 nanoseconds. + Weight::from_parts(41_040_143, 52673) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_174, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -198,16 +184,21 @@ impl WeightInfo for BridgeWeight { /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:1) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `9584` - // Minimum execution time: 45_786 nanoseconds. - Weight::from_parts(47_382_000, 9584) + // Measured: `701` + // Estimated: `3558` + // Minimum execution time: 37_197 nanoseconds. + Weight::from_parts(38_371_000, 3558) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -226,16 +217,21 @@ impl WeightInfo for BridgeWeight { /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:2) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `9584` - // Minimum execution time: 44_544 nanoseconds. - Weight::from_parts(45_451_000, 9584) + // Measured: `701` + // Estimated: `3558` + // Minimum execution time: 38_684 nanoseconds. + Weight::from_parts(39_929_000, 3558) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -254,16 +250,21 @@ impl WeightInfo for BridgeWeight { /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:2) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `12124` - // Minimum execution time: 47_344 nanoseconds. - Weight::from_parts(48_311_000, 12124) + // Measured: `701` + // Estimated: `6126` + // Minimum execution time: 41_363 nanoseconds. + Weight::from_parts(42_621_000, 6126) .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -280,15 +281,15 @@ impl WeightInfo for BridgeWeight { /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32) -> Weight { + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_385 nanoseconds. - Weight::from_parts(54_919_468, 57170) - // Standard Error: 108 - .saturating_add(Weight::from_parts(3_286, 0).saturating_mul(i.into())) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_925 nanoseconds. + Weight::from_parts(39_617_000, 52673) + // Standard Error: 612 + .saturating_add(Weight::from_parts(372_813, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -312,33 +313,39 @@ impl WeightInfo for () { /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_321 nanoseconds. - Weight::from_parts(54_478_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_724 nanoseconds. + Weight::from_parts(40_650_000, 52673) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) + /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_two_messages_proof() -> Weight { + /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49208), added: + /// 51683, mode: MaxEncodedLen) + /// + /// The range of component `n` is `[1, 1004]`. + /// + /// The range of component `n` is `[1, 1004]`. + fn receive_n_messages_proof(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_597 nanoseconds. - Weight::from_parts(69_267_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 39_354 nanoseconds. + Weight::from_parts(29_708_543, 52673) + // Standard Error: 1_185 + .saturating_add(Weight::from_parts(7_648_787, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -358,10 +365,10 @@ impl WeightInfo for () { /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_079 nanoseconds. - Weight::from_parts(65_905_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 45_578 nanoseconds. + Weight::from_parts(47_161_000, 52673) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -377,37 +384,20 @@ impl WeightInfo for () { /// /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 50_588 nanoseconds. - Weight::from_parts(53_544_000, 57170) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) + /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49208), added: + /// 51683, mode: MaxEncodedLen) /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) + /// The range of component `n` is `[1, 16384]`. /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_16_kb() -> Weight { + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 78_269 nanoseconds. - Weight::from_parts(81_748_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_702 nanoseconds. + Weight::from_parts(41_040_143, 52673) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_174, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -428,16 +418,21 @@ impl WeightInfo for () { /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:1) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `9584` - // Minimum execution time: 45_786 nanoseconds. - Weight::from_parts(47_382_000, 9584) + // Measured: `701` + // Estimated: `3558` + // Minimum execution time: 37_197 nanoseconds. + Weight::from_parts(38_371_000, 3558) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -456,16 +451,21 @@ impl WeightInfo for () { /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:2) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `9584` - // Minimum execution time: 44_544 nanoseconds. - Weight::from_parts(45_451_000, 9584) + // Measured: `701` + // Estimated: `3558` + // Minimum execution time: 38_684 nanoseconds. + Weight::from_parts(39_929_000, 3558) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -484,16 +484,21 @@ impl WeightInfo for () { /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:2) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `12124` - // Minimum execution time: 47_344 nanoseconds. - Weight::from_parts(48_311_000, 12124) + // Measured: `701` + // Estimated: `6126` + // Minimum execution time: 41_363 nanoseconds. + Weight::from_parts(42_621_000, 6126) .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -510,15 +515,15 @@ impl WeightInfo for () { /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32) -> Weight { + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_385 nanoseconds. - Weight::from_parts(54_919_468, 57170) - // Standard Error: 108 - .saturating_add(Weight::from_parts(3_286, 0).saturating_mul(i.into())) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_925 nanoseconds. + Weight::from_parts(39_617_000, 52673) + // Standard Error: 612 + .saturating_add(Weight::from_parts(372_813, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/bridges/modules/messages/src/weights_ext.rs b/bridges/modules/messages/src/weights_ext.rs index c12e04f692bf8304fb58d7c97ec50d1b860ccb56..7711e212efb06da0421f57b01ab4d1eef8b48f16 100644 --- a/bridges/modules/messages/src/weights_ext.rs +++ b/bridges/modules/messages/src/weights_ext.rs @@ -40,13 +40,6 @@ pub fn ensure_weights_are_correct() { // benchmarked using `MaxEncodedLen` approach and there are no components that cause additional // db reads - // verify `receive_messages_proof` weight components - assert_ne!(W::receive_messages_proof_overhead().ref_time(), 0); - assert_ne!(W::receive_messages_proof_overhead().proof_size(), 0); - // W::receive_messages_proof_messages_overhead(1).ref_time() may be zero because: - // the message processing code (`InboundLane::receive_message`) is minimal and may not be - // accounted by our benchmarks - assert_eq!(W::receive_messages_proof_messages_overhead(1).proof_size(), 0); // W::receive_messages_proof_outbound_lane_state_overhead().ref_time() may be zero because: // the outbound lane state processing code (`InboundLane::receive_state_update`) is minimal and // may not be accounted by our benchmarks @@ -86,6 +79,19 @@ pub fn ensure_weights_are_correct() { total_messages_in_delivery_proof_does_not_affect_proof_size::(); } +/// Ensure that we are able to dispatch maximal size messages. +pub fn ensure_maximal_message_dispatch( + max_incoming_message_size: u32, + max_incoming_message_dispatch_weight: Weight, +) { + let message_dispatch_weight = W::message_dispatch_weight(max_incoming_message_size); + assert!( + message_dispatch_weight.all_lte(max_incoming_message_dispatch_weight), + "Dispatch weight of maximal message {message_dispatch_weight:?} must be lower \ + than the hardcoded {max_incoming_message_dispatch_weight:?}", + ); +} + /// Ensure that we're able to receive maximal (by-size and by-weight) message from other chain. pub fn ensure_able_to_receive_message( max_extrinsic_size: u32, @@ -98,7 +104,8 @@ pub fn ensure_able_to_receive_message( max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE); assert!( max_delivery_transaction_size <= max_extrinsic_size, - "Size of maximal message delivery transaction {max_incoming_message_proof_size} + {SIGNED_EXTENSIONS_SIZE} is larger than maximal possible transaction size {max_extrinsic_size}", + "Size of maximal message delivery transaction {max_incoming_message_proof_size} + \ + {SIGNED_EXTENSIONS_SIZE} is larger than maximal possible transaction size {max_extrinsic_size}", ); // verify that we're able to receive proof of maximal-size message with maximal dispatch weight @@ -297,13 +304,11 @@ pub trait WeightInfoExt: WeightInfo { dispatch_weight: Weight, ) -> Weight { // basic components of extrinsic weight - let transaction_overhead = Self::receive_messages_proof_overhead(); + let base_weight = Self::receive_n_messages_proof(messages_count); let transaction_overhead_from_runtime = Self::receive_messages_proof_overhead_from_runtime(); let outbound_state_delivery_weight = Self::receive_messages_proof_outbound_lane_state_overhead(); - let messages_delivery_weight = - Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count)); let messages_dispatch_weight = dispatch_weight; // proof size overhead weight @@ -315,10 +320,9 @@ pub trait WeightInfoExt: WeightInfo { actual_proof_size.saturating_sub(expected_proof_size), ); - transaction_overhead + base_weight .saturating_add(transaction_overhead_from_runtime) .saturating_add(outbound_state_delivery_weight) - .saturating_add(messages_delivery_weight) .saturating_add(messages_dispatch_weight) .saturating_add(proof_size_overhead) } @@ -354,25 +358,6 @@ pub trait WeightInfoExt: WeightInfo { // Functions that are used by extrinsics weights formulas. - /// Returns weight overhead of message delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_overhead() -> Weight { - let weight_of_two_messages_and_two_tx_overheads = - Self::receive_single_message_proof().saturating_mul(2); - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - weight_of_two_messages_and_two_tx_overheads - .saturating_sub(weight_of_two_messages_and_single_tx_overhead) - } - - /// Returns weight that needs to be accounted when receiving given a number of messages with - /// message delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight { - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof(); - weight_of_two_messages_and_single_tx_overhead - .saturating_sub(weight_of_single_message_and_single_tx_overhead) - .saturating_mul(messages as _) - } - /// Returns weight that needs to be accounted when message delivery transaction /// (`receive_messages_proof`) is carrying outbound lane state proof. fn receive_messages_proof_outbound_lane_state_overhead() -> Weight { @@ -426,9 +411,8 @@ pub trait WeightInfoExt: WeightInfo { /// is less than that cost). fn storage_proof_size_overhead(proof_size: u32) -> Weight { let proof_size_in_bytes = proof_size; - let byte_weight = (Self::receive_single_message_proof_16_kb() - - Self::receive_single_message_proof_1_kb()) / - (15 * 1024); + let byte_weight = Self::receive_single_n_bytes_message_proof(2) - + Self::receive_single_n_bytes_message_proof(1); proof_size_in_bytes * byte_weight } @@ -440,11 +424,9 @@ pub trait WeightInfoExt: WeightInfo { /// `receive_single_message_proof_with_dispatch` benchmark. See its requirements for /// details. fn message_dispatch_weight(message_size: u32) -> Weight { - // There may be a tiny overweight/underweight here, because we don't account how message - // size affects all steps before dispatch. But the effect should be small enough and we - // may ignore it. - Self::receive_single_message_proof_with_dispatch(message_size) - .saturating_sub(Self::receive_single_message_proof()) + let message_size_in_bytes = message_size; + Self::receive_single_n_bytes_message_proof_with_dispatch(message_size_in_bytes) + .saturating_sub(Self::receive_single_n_bytes_message_proof(message_size_in_bytes)) } } @@ -479,7 +461,7 @@ impl WeightInfoExt for crate::weights::BridgeWeight #[cfg(test)] mod tests { use super::*; - use crate::{mock::TestRuntime, weights::BridgeWeight}; + use crate::{tests::mock::TestRuntime, weights::BridgeWeight}; #[test] fn ensure_default_weights_are_correct() { diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index d3152f8d0a4aa9b6dc1c726441c5e139e08de162..cda0ee8106d5400c33e186265d672f1b9282dc0d 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -11,32 +11,31 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-parachains = { path = "../../primitives/parachains", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -pallet-bridge-grandpa = { path = "../grandpa", default-features = false } +bp-header-chain = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } +pallet-bridge-grandpa = { workspace = true } # Substrate Dependencies -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -bp-header-chain = { path = "../../primitives/header-chain" } -bp-test-utils = { path = "../../primitives/test-utils" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } +bp-header-chain = { workspace = true, default-features = true } +bp-test-utils = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -54,7 +53,6 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-std/std", - "sp-trie/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/bridges/modules/parachains/src/benchmarking.rs b/bridges/modules/parachains/src/benchmarking.rs index 27e06a12a1d93486d93aa258afc1d7de4713df2c..92ece6d688cbea3bed2e5d489163f2416bef9bbc 100644 --- a/bridges/modules/parachains/src/benchmarking.rs +++ b/bridges/modules/parachains/src/benchmarking.rs @@ -22,7 +22,7 @@ use crate::{ }; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; -use bp_runtime::StorageProofSize; +use bp_runtime::UnverifiedStorageProofParams; use frame_benchmarking::{account, benchmarks_instance_pallet}; use frame_system::RawOrigin; use sp_std::prelude::*; @@ -38,7 +38,7 @@ pub trait Config: crate::Config { fn prepare_parachain_heads_proof( parachains: &[ParaId], parachain_head_size: u32, - proof_size: StorageProofSize, + proof_params: UnverifiedStorageProofParams, ) -> (RelayBlockNumber, RelayBlockHash, ParaHeadsProof, Vec<(ParaId, ParaHash)>); } @@ -68,7 +68,7 @@ benchmarks_instance_pallet! { let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof( ¶chains, DEFAULT_PARACHAIN_HEAD_SIZE, - StorageProofSize::Minimal(0), + UnverifiedStorageProofParams::default(), ); let at_relay_block = (relay_block_number, relay_block_hash); }: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof) @@ -85,7 +85,7 @@ benchmarks_instance_pallet! { let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof( ¶chains, DEFAULT_PARACHAIN_HEAD_SIZE, - StorageProofSize::HasLargeLeaf(1024), + UnverifiedStorageProofParams::from_db_size(1024), ); let at_relay_block = (relay_block_number, relay_block_hash); }: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof) @@ -102,7 +102,7 @@ benchmarks_instance_pallet! { let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof( ¶chains, DEFAULT_PARACHAIN_HEAD_SIZE, - StorageProofSize::HasLargeLeaf(16 * 1024), + UnverifiedStorageProofParams::from_db_size(16 * 1024), ); let at_relay_block = (relay_block_number, relay_block_hash); }: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof) diff --git a/bridges/modules/parachains/src/call_ext.rs b/bridges/modules/parachains/src/call_ext.rs index fe6b319205d41491ce2df36d8a1d112eb37f94b4..0f77eaf2c5a93d372cab8af0857f10fa40ca920f 100644 --- a/bridges/modules/parachains/src/call_ext.rs +++ b/bridges/modules/parachains/src/call_ext.rs @@ -289,7 +289,7 @@ mod tests { RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { at_relay_block: (num, [num as u8; 32].into()), parachains, - parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, is_free_execution_expected: false, }) .check_obsolete_submit_parachain_heads() @@ -303,7 +303,7 @@ mod tests { RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { at_relay_block: (num, [num as u8; 32].into()), parachains, - parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, is_free_execution_expected: true, }) .check_obsolete_submit_parachain_heads() diff --git a/bridges/modules/parachains/src/lib.rs b/bridges/modules/parachains/src/lib.rs index d323aef3b22070d1db1e4709fe0dad8bf0360caf..e2c30ce9aecc1eb3b39fc588cc6386481f82fa7f 100644 --- a/bridges/modules/parachains/src/lib.rs +++ b/bridges/modules/parachains/src/lib.rs @@ -28,11 +28,12 @@ pub use weights::WeightInfo; pub use weights_ext::WeightInfoExt; use bp_header_chain::{HeaderChain, HeaderChainError}; -use bp_parachains::{parachain_head_storage_key_at_source, ParaInfo, ParaStoredHeaderData}; -use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; -use bp_runtime::{Chain, HashOf, HeaderId, HeaderIdOf, Parachain, StorageProofError}; +use bp_parachains::{ParaInfo, ParaStoredHeaderData}; +use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; +use bp_runtime::{Chain, HashOf, HeaderId, HeaderIdOf, Parachain}; use frame_support::{dispatch::PostDispatchInfo, DefaultNoBound}; use pallet_bridge_grandpa::SubmitFinalityProofHelper; +use proofs::{ParachainsStorageProofAdapter, StorageProofAdapter}; use sp_std::{marker::PhantomData, vec::Vec}; #[cfg(feature = "runtime-benchmarks")] @@ -55,6 +56,7 @@ pub mod benchmarking; mod call_ext; #[cfg(test)] mod mock; +mod proofs; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "runtime::bridge-parachains"; @@ -448,15 +450,15 @@ pub mod pallet { parachains.len() as _, ); - let mut is_updated_something = false; - let mut storage = GrandpaPalletOf::::storage_proof_checker( - relay_block_hash, - parachain_heads_proof.storage_proof, - ) - .map_err(Error::::HeaderChainStorageProof)?; + let mut storage: ParachainsStorageProofAdapter = + ParachainsStorageProofAdapter::try_new_with_verified_storage_proof( + relay_block_hash, + parachain_heads_proof.storage_proof, + ) + .map_err(Error::::HeaderChainStorageProof)?; for (parachain, parachain_head_hash) in parachains { - let parachain_head = match Self::read_parachain_head(&mut storage, parachain) { + let parachain_head = match storage.read_parachain_head(parachain) { Ok(Some(parachain_head)) => parachain_head, Ok(None) => { log::trace!( @@ -541,7 +543,6 @@ pub mod pallet { parachain_head_hash, )?; - is_updated_something = true; if is_free { free_parachain_heads = free_parachain_heads + 1; } @@ -572,7 +573,7 @@ pub mod pallet { // => treat this as an error // // (we can throw error here, because now all our calls are transactional) - storage.ensure_no_unused_nodes().map_err(|e| { + storage.ensure_no_unused_keys().map_err(|e| { Error::::HeaderChainStorageProof(HeaderChainError::StorageProof(e)) })?; @@ -633,16 +634,6 @@ pub mod pallet { ImportedParaHeads::::get(parachain, hash).map(|h| h.into_inner()) } - /// Read parachain head from storage proof. - fn read_parachain_head( - storage: &mut bp_runtime::StorageProofChecker, - parachain: ParaId, - ) -> Result, StorageProofError> { - let parachain_head_key = - parachain_head_storage_key_at_source(T::ParasPalletName::get(), parachain); - storage.read_and_decode_value(parachain_head_key.0.as_ref()) - } - /// Try to update parachain head. pub(super) fn update_parachain_head( parachain: ParaId, @@ -801,6 +792,7 @@ impl, I: 'static, C: Parachain> HeaderChain pub fn initialize_for_benchmarks, I: 'static, PC: Parachain>( header: HeaderOf, ) { + use bp_polkadot_core::parachains::ParaHead; use bp_runtime::HeaderIdProvider; use sp_runtime::traits::Header; @@ -844,9 +836,10 @@ pub(crate) mod tests { use bp_parachains::{ BestParaHeadHash, BridgeParachainCall, ImportedParaHeadsKeyProvider, ParasInfoKeyProvider, }; + use bp_polkadot_core::parachains::ParaHead; use bp_runtime::{ BasicOperatingMode, OwnedBridgeModuleError, StorageDoubleMapKeyProvider, - StorageMapKeyProvider, + StorageMapKeyProvider, StorageProofError, }; use bp_test_utils::{ authority_list, generate_owned_bridge_module_tests, make_default_justification, diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index dbb62845392d5fd2f408744f4f8a2321ec4bd34d..c49b5939093c50a17fbf369533d5ff9c01b625bc 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -23,7 +23,7 @@ use frame_support::{ use sp_runtime::{ testing::H256, traits::{BlakeTwo256, Header as HeaderT}, - MultiSignature, + MultiSignature, StateVersion, }; use crate as pallet_bridge_parachains; @@ -60,6 +60,8 @@ impl Chain for Parachain1 { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } @@ -87,6 +89,8 @@ impl Chain for Parachain2 { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } @@ -114,6 +118,8 @@ impl Chain for Parachain3 { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } @@ -142,6 +148,8 @@ impl Chain for BigParachain { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } @@ -222,7 +230,7 @@ impl pallet_bridge_parachains::benchmarking::Config<()> for TestRuntime { fn prepare_parachain_heads_proof( parachains: &[ParaId], _parachain_head_size: u32, - _proof_size: bp_runtime::StorageProofSize, + _proof_params: bp_runtime::UnverifiedStorageProofParams, ) -> ( crate::RelayBlockNumber, crate::RelayBlockHash, @@ -256,38 +264,7 @@ impl Chain for TestBridgedChain { type Nonce = u32; type Signature = sp_runtime::testing::TestSignature; - fn max_extrinsic_size() -> u32 { - unreachable!() - } - - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -impl ChainWithGrandpa for TestBridgedChain { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; - const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; - const MAX_MANDATORY_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE: u32 = 64; -} - -#[derive(Debug)] -pub struct OtherBridgedChain; - -impl Chain for OtherBridgedChain { - const ID: ChainId = *b"obch"; - - type BlockNumber = u64; - type Hash = crate::RelayBlockHash; - type Hasher = crate::RelayBlockHasher; - type Header = sp_runtime::generic::Header; - - type AccountId = AccountId; - type Balance = u32; - type Nonce = u32; - type Signature = sp_runtime::testing::TestSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; fn max_extrinsic_size() -> u32 { unreachable!() @@ -298,7 +275,7 @@ impl Chain for OtherBridgedChain { } } -impl ChainWithGrandpa for OtherBridgedChain { +impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; diff --git a/bridges/modules/parachains/src/proofs.rs b/bridges/modules/parachains/src/proofs.rs new file mode 100644 index 0000000000000000000000000000000000000000..dcf22229f3423a9a75f4be68d1af2b1bf2e7ea1e --- /dev/null +++ b/bridges/modules/parachains/src/proofs.rs @@ -0,0 +1,81 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tools for parachain head proof verification. + +use crate::{Config, GrandpaPalletOf, RelayBlockHash, RelayBlockHasher}; +use bp_header_chain::{HeaderChain, HeaderChainError}; +use bp_parachains::parachain_head_storage_key_at_source; +use bp_polkadot_core::parachains::{ParaHead, ParaId}; +use bp_runtime::{RawStorageProof, StorageProofChecker, StorageProofError}; +use codec::Decode; +use frame_support::traits::Get; + +/// Abstraction over storage proof manipulation, hiding implementation details of actual storage +/// proofs. +pub trait StorageProofAdapter, I: 'static> { + /// Read and decode optional value from the proof. + fn read_and_decode_optional_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result, StorageProofError>; + + /// Checks if each key was read. + fn ensure_no_unused_keys(self) -> Result<(), StorageProofError>; + + /// Read parachain head from storage proof. + fn read_parachain_head( + &mut self, + parachain: ParaId, + ) -> Result, StorageProofError> { + let parachain_head_key = + parachain_head_storage_key_at_source(T::ParasPalletName::get(), parachain); + self.read_and_decode_optional_value(¶chain_head_key) + } +} + +/// Actual storage proof adapter for parachain proofs. +pub type ParachainsStorageProofAdapter = RawStorageProofAdapter; + +/// A `StorageProofAdapter` implementation for raw storage proofs. +pub struct RawStorageProofAdapter, I: 'static> { + storage: StorageProofChecker, + _dummy: sp_std::marker::PhantomData<(T, I)>, +} + +impl, I: 'static> RawStorageProofAdapter { + /// Try to create a new instance of `RawStorageProofAdapter`. + pub fn try_new_with_verified_storage_proof( + relay_block_hash: RelayBlockHash, + storage_proof: RawStorageProof, + ) -> Result { + GrandpaPalletOf::::verify_storage_proof(relay_block_hash, storage_proof) + .map(|storage| RawStorageProofAdapter:: { storage, _dummy: Default::default() }) + } +} + +impl, I: 'static> StorageProofAdapter for RawStorageProofAdapter { + fn read_and_decode_optional_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result, StorageProofError> { + self.storage.read_and_decode_opt_value(key.as_ref()) + } + + fn ensure_no_unused_keys(self) -> Result<(), StorageProofError> { + self.storage.ensure_no_unused_nodes() + } +} diff --git a/bridges/modules/parachains/src/weights.rs b/bridges/modules/parachains/src/weights.rs index abddc8768947006e574bf6bca4d2301c2047199a..1f92b7ff763c3f572efef1acf141a9eba0d3eb0e 100644 --- a/bridges/modules/parachains/src/weights.rs +++ b/bridges/modules/parachains/src/weights.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for pallet_bridge_parachains //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz` +//! HOSTNAME: `serban-ROG-Zephyrus`, CPU: `12th Gen Intel(R) Core(TM) i7-12700H` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -86,14 +86,12 @@ impl WeightInfo for BridgeWeight { /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight { + fn submit_parachain_heads_with_n_parachains(_p: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 36_701 nanoseconds. - Weight::from_parts(38_597_828, 4648) - // Standard Error: 190_859 - .saturating_add(Weight::from_parts(60_685, 0).saturating_mul(p.into())) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 30_211 nanoseconds. + Weight::from_parts(32_633_893, 3038) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -123,10 +121,10 @@ impl WeightInfo for BridgeWeight { /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 38_189 nanoseconds. - Weight::from_parts(39_252_000, 4648) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 30_830 nanoseconds. + Weight::from_parts(31_801_000, 3038) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -156,10 +154,10 @@ impl WeightInfo for BridgeWeight { /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 62_868 nanoseconds. - Weight::from_parts(63_581_000, 4648) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 44_736 nanoseconds. + Weight::from_parts(45_296_000, 3038) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -193,14 +191,12 @@ impl WeightInfo for () { /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight { + fn submit_parachain_heads_with_n_parachains(_p: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 36_701 nanoseconds. - Weight::from_parts(38_597_828, 4648) - // Standard Error: 190_859 - .saturating_add(Weight::from_parts(60_685, 0).saturating_mul(p.into())) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 30_211 nanoseconds. + Weight::from_parts(32_633_893, 3038) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -230,10 +226,10 @@ impl WeightInfo for () { /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 38_189 nanoseconds. - Weight::from_parts(39_252_000, 4648) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 30_830 nanoseconds. + Weight::from_parts(31_801_000, 3038) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -263,10 +259,10 @@ impl WeightInfo for () { /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 62_868 nanoseconds. - Weight::from_parts(63_581_000, 4648) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 44_736 nanoseconds. + Weight::from_parts(45_296_000, 3038) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 08e1438d4f1946fb41f614b0e94c0ce6f1611fd5..27a28546afb482851040cbe16bd40071e86a70cb 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -11,31 +11,31 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge dependencies -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-relayers = { path = "../../primitives/relayers", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -pallet-bridge-messages = { path = "../messages", default-features = false } +bp-messages = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { workspace = true } +pallet-bridge-messages = { workspace = true } # Substrate Dependencies -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -bp-runtime = { path = "../../primitives/runtime" } -pallet-balances = { path = "../../../substrate/frame/balances" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } +bp-runtime = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index 7a3a0f9ea94cbe5768bf6ee8c850355193ea44f0..2c86ec01f5b915bc3b3b1e65c15adae7c45e1819 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -63,7 +63,7 @@ pub mod pallet { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Type of relayer reward. - type Reward: AtLeast32BitUnsigned + Copy + Parameter + MaxEncodedLen; + type Reward: AtLeast32BitUnsigned + Copy + Member + Parameter + MaxEncodedLen; /// Pay rewards scheme. type PaymentProcedure: PaymentProcedure; /// Stake and slash scheme. diff --git a/bridges/modules/relayers/src/payment_adapter.rs b/bridges/modules/relayers/src/payment_adapter.rs index b2d9c676bddc493700a45fc957235dbb9516296b..f75c409aca4f3ef85fab748e4dc41af12b545562 100644 --- a/bridges/modules/relayers/src/payment_adapter.rs +++ b/bridges/modules/relayers/src/payment_adapter.rs @@ -23,6 +23,7 @@ use bp_messages::{ LaneId, MessageNonce, }; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; +use bp_runtime::Chain; use frame_support::{sp_runtime::SaturatedConversion, traits::Get}; use sp_arithmetic::traits::{Saturating, Zero}; use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData, ops::RangeInclusive}; @@ -57,7 +58,7 @@ where relayers_rewards, RewardsAccountParams::new( lane_id, - T::BridgedChainId::get(), + T::BridgedChain::ID, RewardsAccountOwner::BridgedChain, ), DeliveryReward::get(), diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index b80240c974de9f5874e2825f5506885fea11ef3a..ec7c3b5628327f6cbb3d5b3920dba59521c6c209 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -11,31 +11,31 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { features = ["bit-vec", "derive", "serde"], workspace = true } # Bridge dependencies -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { workspace = true } # Substrate Dependencies -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Polkadot Dependencies -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [dev-dependencies] -sp-io = { path = "../../../substrate/primitives/io" } -sp-std = { path = "../../../substrate/primitives/std" } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index 9b22770061a9a9ffd981f186de9231d7ff41cde9..092df477265fc0933180feaf355f174ca433df6f 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -11,33 +11,33 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-xcm-bridge-hub = { path = "../../primitives/xcm-bridge-hub", default-features = false } -pallet-bridge-messages = { path = "../messages", default-features = false } -bridge-runtime-common = { path = "../../bin/runtime-common", default-features = false } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } +bp-xcm-bridge-hub = { workspace = true } +pallet-bridge-messages = { workspace = true } +bridge-runtime-common = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Polkadot Dependencies -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } [dev-dependencies] -bp-header-chain = { path = "../../primitives/header-chain" } -pallet-balances = { path = "../../../substrate/frame/balances" } -sp-io = { path = "../../../substrate/primitives/io" } +bp-header-chain = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/xcm-bridge-hub/src/mock.rs b/bridges/modules/xcm-bridge-hub/src/mock.rs index 4c09bce56d73eea717ad5149084e2ae337e48e87..df72e7a3c4fcc596cd89cc119c44aeb7c6915eaf 100644 --- a/bridges/modules/xcm-bridge-hub/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub/src/mock.rs @@ -20,23 +20,17 @@ use crate as pallet_xcm_bridge_hub; use bp_messages::{ target_chain::{DispatchMessage, MessageDispatch}, - LaneId, -}; -use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, UnderlyingChainProvider}; -use bridge_runtime_common::{ - messages::{ - source::TargetHeaderChainAdapter, target::SourceHeaderChainAdapter, - BridgedChainWithMessages, HashOf, MessageBridge, ThisChainWithMessages, - }, - messages_xcm_extension::{SenderAndLane, XcmBlobHauler}, + ChainWithMessages, LaneId, MessageNonce, }; +use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, HashOf}; +use bridge_runtime_common::messages_xcm_extension::{SenderAndLane, XcmBlobHauler}; use codec::Encode; -use frame_support::{derive_impl, parameter_types, traits::ConstU32, weights::RuntimeDbWeight}; +use frame_support::{derive_impl, parameter_types, weights::RuntimeDbWeight}; use sp_core::H256; use sp_runtime::{ testing::Header as SubstrateHeader, traits::{BlakeTwo256, IdentityLookup}, - AccountId32, BuildStorage, + AccountId32, BuildStorage, StateVersion, }; use xcm::prelude::*; @@ -85,20 +79,17 @@ impl pallet_bridge_messages::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type WeightInfo = TestMessagesWeights; - type BridgedChainId = (); type ActiveOutboundLanes = ActiveOutboundLanes; - type MaxUnrewardedRelayerEntriesAtInboundLane = (); - type MaxUnconfirmedMessagesAtInboundLane = (); - type MaximalOutboundPayloadSize = ConstU32<2048>; type OutboundPayload = Vec; type InboundPayload = Vec; - type InboundRelayer = (); type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = (); type OnMessagesDelivered = (); - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = TestMessageDispatch; + + type ThisChain = ThisUnderlyingChain; + type BridgedChain = BridgedUnderlyingChain; + type BridgedHeaderChain = BridgedHeaderChain; } pub struct TestMessagesWeights; @@ -107,34 +98,27 @@ impl pallet_bridge_messages::WeightInfo for TestMessagesWeights { fn receive_single_message_proof() -> Weight { Weight::zero() } - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { + fn receive_n_messages_proof(_: u32) -> Weight { Weight::zero() } - fn receive_delivery_proof_for_single_message() -> Weight { + fn receive_single_message_proof_with_outbound_lane_state() -> Weight { Weight::zero() } - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { + fn receive_single_n_bytes_message_proof(_: u32) -> Weight { Weight::zero() } - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { + fn receive_delivery_proof_for_single_message() -> Weight { Weight::zero() } - - fn receive_two_messages_proof() -> Weight { + fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { Weight::zero() } - - fn receive_single_message_proof_1_kb() -> Weight { + fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { Weight::zero() } - - fn receive_single_message_proof_16_kb() -> Weight { + fn receive_single_n_bytes_message_proof_with_dispatch(_: u32) -> Weight { Weight::zero() } - - fn receive_single_message_proof_with_dispatch(_: u32) -> Weight { - Weight::from_parts(1, 0) - } } impl pallet_bridge_messages::WeightInfoExt for TestMessagesWeights { @@ -198,9 +182,9 @@ impl XcmBlobHauler for TestXcmBlobHauler { type UncongestedMessage = (); } -pub struct ThisChain; +pub struct ThisUnderlyingChain; -impl Chain for ThisChain { +impl Chain for ThisUnderlyingChain { const ID: ChainId = *b"tuch"; type BlockNumber = u64; type Hash = H256; @@ -211,6 +195,8 @@ impl Chain for ThisChain { type Nonce = u64; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { u32::MAX } @@ -220,12 +206,19 @@ impl Chain for ThisChain { } } -pub struct BridgedChain; +impl ChainWithMessages for ThisUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; +} + +pub struct BridgedUnderlyingChain; pub type BridgedHeaderHash = H256; pub type BridgedChainHeader = SubstrateHeader; -impl Chain for BridgedChain { - const ID: ChainId = *b"tuch"; +impl Chain for BridgedUnderlyingChain { + const ID: ChainId = *b"bgdc"; type BlockNumber = u64; type Hash = BridgedHeaderHash; type Hasher = BlakeTwo256; @@ -235,6 +228,8 @@ impl Chain for BridgedChain { type Nonce = u64; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 4096 } @@ -244,6 +239,12 @@ impl Chain for BridgedChain { } } +impl ChainWithMessages for BridgedUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; +} + /// Test message dispatcher. pub struct TestMessageDispatch; @@ -272,42 +273,15 @@ impl MessageDispatch for TestMessageDispatch { } } -pub struct WrappedThisChain; -impl UnderlyingChainProvider for WrappedThisChain { - type Chain = ThisChain; -} -impl ThisChainWithMessages for WrappedThisChain { - type RuntimeOrigin = RuntimeOrigin; -} - -pub struct WrappedBridgedChain; -impl UnderlyingChainProvider for WrappedBridgedChain { - type Chain = BridgedChain; -} -impl BridgedChainWithMessages for WrappedBridgedChain {} - pub struct BridgedHeaderChain; -impl bp_header_chain::HeaderChain for BridgedHeaderChain { +impl bp_header_chain::HeaderChain for BridgedHeaderChain { fn finalized_header_state_root( - _hash: HashOf, - ) -> Option> { + _hash: HashOf, + ) -> Option> { unreachable!() } } -/// Bridge that is deployed on `ThisChain` and allows sending/receiving messages to/from -/// `BridgedChain`. -#[derive(Debug, PartialEq, Eq)] -pub struct OnThisChainBridge; - -impl MessageBridge for OnThisChainBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = WrappedThisChain; - type BridgedChain = WrappedBridgedChain; - type BridgedHeaderChain = BridgedHeaderChain; -} - /// Run pallet test. pub fn run_test(test: impl FnOnce() -> T) -> T { sp_io::TestExternalities::new( diff --git a/bridges/primitives/beefy/Cargo.toml b/bridges/primitives/beefy/Cargo.toml index bd68076ca48fc8ccc7bb8f48611083c0930731f7..404acaff30af252f6e5c52d9b28e8ccc72d542ce 100644 --- a/bridges/primitives/beefy/Cargo.toml +++ b/bridges/primitives/beefy/Cargo.toml @@ -12,23 +12,23 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } -serde = { default-features = false, features = ["alloc", "derive"], workspace = true } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive"], workspace = true } +serde = { features = ["alloc", "derive"], workspace = true } # Bridge Dependencies -bp-runtime = { path = "../runtime", default-features = false } +bp-runtime = { workspace = true } # Substrate Dependencies -binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -pallet-beefy-mmr = { path = "../../../substrate/frame/beefy-mmr", default-features = false } -pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +binary-merkle-tree = { workspace = true } +sp-consensus-beefy = { workspace = true } +frame-support = { workspace = true } +pallet-beefy-mmr = { workspace = true } +pallet-mmr = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index def1f7ad4dfefb14c3f8459a3d2960c3890ddcf8..081bda479495f5bbd4599b4230d45f5c4e3c5e85 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -11,27 +11,27 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -finality-grandpa = { version = "0.16.2", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +finality-grandpa = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies -bp-runtime = { path = "../runtime", default-features = false } +bp-runtime = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-std = { workspace = true } [dev-dependencies] -bp-test-utils = { path = "../test-utils" } -hex = "0.4" -hex-literal = "0.4" +bp-test-utils = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index af2afb65a26a7f206fdbfcf22e20cb5100a8c95f..26295dee1801a127f455ed3288bd5232cb60bc10 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -46,7 +46,7 @@ pub mod storage_keys; pub enum HeaderChainError { /// Header with given hash is missing from the chain. UnknownHeader, - /// Storage proof related error. + /// Error generated by the `storage_proof` module. StorageProof(StorageProofError), } @@ -78,8 +78,9 @@ impl StoredHeaderDataBuilder for H { pub trait HeaderChain { /// Returns state (storage) root of given finalized header. fn finalized_header_state_root(header_hash: HashOf) -> Option>; + /// Get storage proof checker using finalized header. - fn storage_proof_checker( + fn verify_storage_proof( header_hash: HashOf, storage_proof: RawStorageProof, ) -> Result>, HeaderChainError> { @@ -409,7 +410,9 @@ mod tests { use super::*; use bp_runtime::ChainId; use frame_support::weights::Weight; - use sp_runtime::{testing::H256, traits::BlakeTwo256, DigestItem, MultiSignature}; + use sp_runtime::{ + testing::H256, traits::BlakeTwo256, DigestItem, MultiSignature, StateVersion, + }; struct TestChain; @@ -425,6 +428,8 @@ mod tests { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 20337873c2e6abac5872807adf67557be60a46e8..4a9037342bcea66d813cdb79969c4ff3172f0bab 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -11,24 +11,24 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies -bp-runtime = { path = "../runtime", default-features = false } -bp-header-chain = { path = "../header-chain", default-features = false } +bp-runtime = { workspace = true } +bp-header-chain = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -hex = "0.4" -hex-literal = "0.4" +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/primitives/messages/src/lib.rs b/bridges/primitives/messages/src/lib.rs index c3f79b3ee388c4584def56056f6cdf6328032e18..9984f8ac3222780ea2fd6af56dd896beb264b2d5 100644 --- a/bridges/primitives/messages/src/lib.rs +++ b/bridges/primitives/messages/src/lib.rs @@ -38,6 +38,9 @@ pub mod source_chain; pub mod storage_keys; pub mod target_chain; +/// Hard limit on message size that can be sent over the bridge. +pub const HARD_MESSAGE_SIZE_LIMIT: u32 = 64 * 1024; + /// Substrate-based chain with messaging support. pub trait ChainWithMessages: Chain { /// Name of the bridge messages pallet (used in `construct_runtime` macro call) that is @@ -48,11 +51,63 @@ pub trait ChainWithMessages: Chain { const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str; /// Maximal number of unrewarded relayers in a single confirmation transaction at this - /// `ChainWithMessages`. + /// `ChainWithMessages`. Unrewarded means that the relayer has delivered messages, but + /// either confirmations haven't been delivered back to the source chain, or we haven't + /// received reward confirmations yet. + /// + /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep + /// in mind that the same relayer account may take several (non-consecutive) entries in this + /// set. const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce; /// Maximal number of unconfirmed messages in a single confirmation transaction at this - /// `ChainWithMessages`. + /// `ChainWithMessages`. Unconfirmed means that the + /// message has been delivered, but either confirmations haven't been delivered back to the + /// source chain, or we haven't received reward confirmations for these messages yet. + /// + /// This constant limits difference between last message from last entry of the + /// `InboundLaneData::relayers` and first message at the first entry. + /// + /// There is no point of making this parameter lesser than + /// `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX`, because then maximal number of relayer entries + /// will be limited by maximal number of messages. + /// + /// This value also represents maximal number of messages in single delivery transaction. + /// Transaction that is declaring more messages than this value, will be rejected. Even if + /// these messages are from different lanes. const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce; + + /// Return maximal dispatch weight of the message we're able to receive. + fn maximal_incoming_message_dispatch_weight() -> Weight { + // we leave 1/2 of `max_extrinsic_weight` for the delivery transaction itself + Self::max_extrinsic_weight() / 2 + } + + /// Return maximal size of the message we're able to receive. + fn maximal_incoming_message_size() -> u32 { + maximal_incoming_message_size(Self::max_extrinsic_size()) + } +} + +/// Return maximal size of the message the chain with `max_extrinsic_size` is able to receive. +pub fn maximal_incoming_message_size(max_extrinsic_size: u32) -> u32 { + // The maximal size of extrinsic at Substrate-based chain depends on the + // `frame_system::Config::MaximumBlockLength` and + // `frame_system::Config::AvailableBlockRatio` constants. This check is here to be sure that + // the lane won't stuck because message is too large to fit into delivery transaction. + // + // **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not + // the message itself. The proof is always larger than the message. But unless chain state + // is enormously large, it should be several dozens/hundreds of bytes. The delivery + // transaction also contains signatures and signed extensions. Because of this, we reserve + // 1/3 of the the maximal extrinsic size for this data. + // + // **ANOTHER IMPORTANT NOTE**: large message means not only larger proofs and heavier + // proof verification, but also heavier message decoding and dispatch. So we have a hard + // limit of `64Kb`, which in practice limits the message size on all chains. Without this + // limit the **weight** (not the size) of the message will be higher than the + // `Self::maximal_incoming_message_dispatch_weight()`. + + sp_std::cmp::min(max_extrinsic_size / 3 * 2, HARD_MESSAGE_SIZE_LIMIT) } impl ChainWithMessages for T @@ -112,7 +167,19 @@ impl OperatingMode for MessagesOperatingMode { /// Lane id which implements `TypeId`. #[derive( - Clone, Copy, Decode, Default, Encode, Eq, Ord, PartialOrd, PartialEq, TypeInfo, MaxEncodedLen, + Clone, + Copy, + Decode, + Default, + Encode, + Eq, + Ord, + PartialOrd, + PartialEq, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, )] pub struct LaneId(pub [u8; 4]); @@ -435,7 +502,7 @@ where AccountId: sp_std::cmp::Ord, { // remember to reward relayers that have delivered messages - // this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain + // this loop is bounded by `T::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` on the bridged chain let mut relayers_rewards = RelayersRewards::new(); for entry in messages_relayers { let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start()); @@ -486,11 +553,11 @@ pub enum VerificationError { InvalidMessageWeight, /// Declared messages count doesn't match actual value. MessagesCountMismatch, - /// Error returned while reading/decoding message data from the storage proof. + /// Error returned while reading/decoding message data from the `VerifiedStorageProof`. MessageStorage(StorageProofError), /// The message is too large. MessageTooLarge, - /// Error returned while reading/decoding outbound lane data from the storage proof. + /// Error returned while reading/decoding outbound lane data from the `VerifiedStorageProof`. OutboundLaneStorage(StorageProofError), /// Storage proof related error. StorageProof(StorageProofError), diff --git a/bridges/primitives/messages/src/source_chain.rs b/bridges/primitives/messages/src/source_chain.rs index f4aefd9735583e265c3e44713f13f81ae63ba276..64f015bdb822eb25ff42a3fdb52bf6405a5424bf 100644 --- a/bridges/primitives/messages/src/source_chain.rs +++ b/bridges/primitives/messages/src/source_chain.rs @@ -16,11 +16,11 @@ //! Primitives of messages module, that are used on the source chain. -use crate::{InboundLaneData, LaneId, MessageNonce, VerificationError}; +use crate::{LaneId, MessageNonce, UnrewardedRelayer}; -use crate::UnrewardedRelayer; -use bp_runtime::Size; -use frame_support::Parameter; +use bp_runtime::{raw_storage_proof_size, RawStorageProof, Size}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; use sp_core::RuntimeDebug; use sp_std::{ collections::{btree_map::BTreeMap, vec_deque::VecDeque}, @@ -28,42 +28,36 @@ use sp_std::{ ops::RangeInclusive, }; -/// Number of messages, delivered by relayers. -pub type RelayersRewards = BTreeMap; - -/// Target chain API. Used by source chain to verify target chain proofs. +/// Messages delivery proof from the bridged chain. /// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. +/// It contains everything required to prove that our (this chain) messages have been +/// delivered to the bridged (target) chain: /// -/// The `Payload` type here means the payload of the message that is sent from the -/// source chain to the target chain. The `AccountId` type here means the account -/// type used by the source chain. -pub trait TargetHeaderChain { - /// Proof that messages have been received by target chain. - type MessagesDeliveryProof: Parameter + Size; - - /// Verify message payload before we accept it. - /// - /// **CAUTION**: this is very important function. Incorrect implementation may lead - /// to stuck lanes and/or relayers loses. - /// - /// The proper implementation must ensure that the delivery-transaction with this - /// payload would (at least) be accepted into target chain transaction pool AND - /// eventually will be successfully mined. The most obvious incorrect implementation - /// example would be implementation for BTC chain that accepts payloads larger than - /// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer - /// will be unable to craft valid transaction => this (and all subsequent) messages will - /// never be delivered. - fn verify_message(payload: &Payload) -> Result<(), VerificationError>; - - /// Verify messages delivery proof and return lane && nonce of the latest received message. - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), VerificationError>; +/// - hash of finalized header; +/// +/// - storage proof of the inbound lane state; +/// +/// - lane id. +#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct FromBridgedChainMessagesDeliveryProof { + /// Hash of the bridge header the proof is for. + pub bridged_header_hash: BridgedHeaderHash, + /// Storage trie proof generated for [`Self::bridged_header_hash`]. + pub storage_proof: RawStorageProof, + /// Lane id of which messages were delivered and the proof is for. + pub lane: LaneId, +} + +impl Size for FromBridgedChainMessagesDeliveryProof { + fn size(&self) -> u32 { + use frame_support::sp_runtime::SaturatedConversion; + raw_storage_proof_size(&self.storage_proof).saturated_into() + } } +/// Number of messages, delivered by relayers. +pub type RelayersRewards = BTreeMap; + /// Manages payments that are happening at the source chain during delivery confirmation /// transaction. pub trait DeliveryConfirmationPayments { @@ -143,28 +137,10 @@ pub trait MessagesBridge { fn send_message(message: Self::SendMessageArgs) -> SendMessageArtifacts; } -/// Structure that may be used in place of `TargetHeaderChain` and -/// `MessageDeliveryAndDispatchPayment` on chains, where outbound messages are forbidden. +/// Structure that may be used in place `MessageDeliveryAndDispatchPayment` on chains, +/// where outbound messages are forbidden. pub struct ForbidOutboundMessages; -/// Error message that is used in `ForbidOutboundMessages` implementation. -const ALL_OUTBOUND_MESSAGES_REJECTED: &str = - "This chain is configured to reject all outbound messages"; - -impl TargetHeaderChain for ForbidOutboundMessages { - type MessagesDeliveryProof = (); - - fn verify_message(_payload: &Payload) -> Result<(), VerificationError> { - Err(VerificationError::Other(ALL_OUTBOUND_MESSAGES_REJECTED)) - } - - fn verify_messages_delivery_proof( - _proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), VerificationError> { - Err(VerificationError::Other(ALL_OUTBOUND_MESSAGES_REJECTED)) - } -} - impl DeliveryConfirmationPayments for ForbidOutboundMessages { type Error = &'static str; diff --git a/bridges/primitives/messages/src/target_chain.rs b/bridges/primitives/messages/src/target_chain.rs index 388ce16ccdc06d3e2c42c3a094aae4d6180a0d09..74fecb9d9f0d89420d2ca54a4356e23c9130e614 100644 --- a/bridges/primitives/messages/src/target_chain.rs +++ b/bridges/primitives/messages/src/target_chain.rs @@ -16,17 +16,48 @@ //! Primitives of messages module, that are used on the target chain. -use crate::{ - LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData, VerificationError, -}; +use crate::{LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData}; -use bp_runtime::{messages::MessageDispatchResult, Size}; +use bp_runtime::{messages::MessageDispatchResult, raw_storage_proof_size, RawStorageProof, Size}; use codec::{Decode, Encode, Error as CodecError}; -use frame_support::{weights::Weight, Parameter}; +use frame_support::weights::Weight; use scale_info::TypeInfo; use sp_core::RuntimeDebug; use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, marker::PhantomData, prelude::*}; +/// Messages proof from bridged chain. +/// +/// It contains everything required to prove that bridged (source) chain has +/// sent us some messages: +/// +/// - hash of finalized header; +/// +/// - storage proof of messages and (optionally) outbound lane state; +/// +/// - lane id; +/// +/// - nonces (inclusive range) of messages which are included in this proof. +#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct FromBridgedChainMessagesProof { + /// Hash of the finalized bridged header the proof is for. + pub bridged_header_hash: BridgedHeaderHash, + /// A storage trie proof of messages being delivered. + pub storage_proof: RawStorageProof, + /// Messages in this proof are sent over this lane. + pub lane: LaneId, + /// Nonce of the first message being delivered. + pub nonces_start: MessageNonce, + /// Nonce of the last message being delivered. + pub nonces_end: MessageNonce, +} + +impl Size for FromBridgedChainMessagesProof { + fn size(&self) -> u32 { + use frame_support::sp_runtime::SaturatedConversion; + raw_storage_proof_size(&self.storage_proof).saturated_into() + } +} + /// Proved messages from the source chain. pub type ProvedMessages = BTreeMap>; @@ -55,33 +86,6 @@ pub struct DispatchMessage { pub data: DispatchMessageData, } -/// Source chain API. Used by target chain, to verify source chain proofs. -/// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. -pub trait SourceHeaderChain { - /// Proof that messages are sent from source chain. This may also include proof - /// of corresponding outbound lane states. - type MessagesProof: Parameter + Size; - - /// Verify messages proof and return proved messages. - /// - /// Returns error if either proof is incorrect, or the number of messages in the proof - /// is not matching the `messages_count`. - /// - /// Messages vector is required to be sorted by nonce within each lane. Out-of-order - /// messages will be rejected. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result, VerificationError>; -} - /// Called when inbound message is received. pub trait MessageDispatch { /// Decoded message payload type. Valid message may contain invalid payload. In this case @@ -167,32 +171,11 @@ impl DeliveryPayments for () { } } -/// Structure that may be used in place of `SourceHeaderChain` and `MessageDispatch` on chains, +/// Structure that may be used in place of `MessageDispatch` on chains, /// where inbound messages are forbidden. -pub struct ForbidInboundMessages( - PhantomData<(MessagesProof, DispatchPayload)>, -); - -/// Error message that is used in `ForbidInboundMessages` implementation. -const ALL_INBOUND_MESSAGES_REJECTED: &str = - "This chain is configured to reject all inbound messages"; - -impl SourceHeaderChain - for ForbidInboundMessages -{ - type MessagesProof = MessagesProof; - - fn verify_messages_proof( - _proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result, VerificationError> { - Err(VerificationError::Other(ALL_INBOUND_MESSAGES_REJECTED)) - } -} +pub struct ForbidInboundMessages(PhantomData); -impl MessageDispatch - for ForbidInboundMessages -{ +impl MessageDispatch for ForbidInboundMessages { type DispatchPayload = DispatchPayload; type DispatchLevelResult = (); diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index a6e71876cefbb3963ef1923469d641281cda00dc..173380c8224d2855e2022cc6b6f9266fc8094ecd 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -11,22 +11,22 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Bridge dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index d4b2f503e9e2ca92c095649f8aa36741d02c8037..acae2f431bf20aa8babf57e586cd4bb726fe2ec8 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -11,26 +11,26 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -parity-util-mem = { version = "0.12.0", optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +parity-util-mem = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Bridge Dependencies -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -hex = "0.4" +hex = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/primitives/polkadot-core/src/parachains.rs b/bridges/primitives/polkadot-core/src/parachains.rs index 433cd2845abd9ae95687d6f1d024765ee3bd2ebb..d54ee108386edf50865c500eb1062c24b01c0c3e 100644 --- a/bridges/primitives/polkadot-core/src/parachains.rs +++ b/bridges/primitives/polkadot-core/src/parachains.rs @@ -22,7 +22,7 @@ //! parachains. Having pallets that are referencing polkadot, would mean that there may //! be two versions of polkadot crates included in the runtime. Which is bad. -use bp_runtime::{RawStorageProof, Size}; +use bp_runtime::{raw_storage_proof_size, RawStorageProof, Size}; use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::Hasher; @@ -96,11 +96,7 @@ pub struct ParaHeadsProof { impl Size for ParaHeadsProof { fn size(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) + use frame_support::sp_runtime::SaturatedConversion; + raw_storage_proof_size(&self.storage_proof).saturated_into() } } diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 5081dddce1e61eccbae540f665257e122d777dd6..3448e8a4096339966023d0f5ddd0e158380ab12a 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -11,23 +11,23 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive"], workspace = true } # Bridge Dependencies -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -hex = "0.4" -hex-literal = "0.4" +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/primitives/relayers/src/lib.rs b/bridges/primitives/relayers/src/lib.rs index 2a9ef6a8e1e9aba999ea90045447f7a87fb3813b..436f33db4008013572592939b63c0e02e92b8e99 100644 --- a/bridges/primitives/relayers/src/lib.rs +++ b/bridges/primitives/relayers/src/lib.rs @@ -140,8 +140,8 @@ pub struct RelayerRewardsKeyProvider(PhantomData<(AccountId, impl StorageDoubleMapKeyProvider for RelayerRewardsKeyProvider where - AccountId: Codec + EncodeLike, - Reward: Codec + EncodeLike, + AccountId: 'static + Codec + EncodeLike + Send + Sync, + Reward: 'static + Codec + EncodeLike + Send + Sync, { const MAP_NAME: &'static str = "RelayerRewards"; diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index ac65ad538b4988c71e59d081cba46d47ebdc7c39..117409b37b9457f93194585b12aabd0de00d5c7f 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -11,28 +11,28 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -hash-db = { version = "0.16.0", default-features = false } -impl-trait-for-tuples = "0.2.2" +codec = { workspace = true } +hash-db = { workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } -num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +num-traits = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -trie-db = { version = "0.29.0", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-state-machine = { workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } +trie-db = { workspace = true } [dev-dependencies] -hex-literal = "0.4" +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] @@ -53,3 +53,4 @@ std = [ "sp-trie/std", "trie-db/std", ] +test-helpers = [] diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 369386e41b0cf9f2d911ca40fc9e6ccfb3de6e52..0db4eac79a7500bf295756efaef514f5a4429e6c 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -24,7 +24,7 @@ use sp_runtime::{ AtLeast32Bit, AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, MaybeSerialize, MaybeSerializeDeserialize, Member, SimpleBitOps, Verify, }, - FixedPointOperand, + FixedPointOperand, StateVersion, }; use sp_std::{fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; @@ -196,6 +196,10 @@ pub trait Chain: Send + Sync + 'static { /// Signature type, used on this chain. type Signature: Parameter + Verify; + /// Version of the state implementation used by this chain. This is directly related with the + /// `TrieLayout` configuration used by the storage. + const STATE_VERSION: StateVersion; + /// Get the maximum size (in bytes) of a Normal extrinsic at this chain. fn max_extrinsic_size() -> u32; /// Get the maximum weight (compute time) that a Normal extrinsic at this chain can use. @@ -223,6 +227,8 @@ where type Nonce = ::Nonce; type Signature = ::Signature; + const STATE_VERSION: StateVersion = ::STATE_VERSION; + fn max_extrinsic_size() -> u32 { ::max_extrinsic_size() } diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 5daba0351ad48f0ae39b870990b6f5ccea1bec1e..8f5040ad9a1bee5efad995b4e62d5e883dfb0c9f 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -40,15 +40,18 @@ pub use chain::{ }; pub use frame_support::storage::storage_prefix as storage_value_final_key; use num_traits::{CheckedAdd, CheckedSub, One, SaturatingAdd, Zero}; +#[cfg(feature = "std")] +pub use storage_proof::craft_valid_storage_proof; +#[cfg(feature = "test-helpers")] pub use storage_proof::{ - record_all_keys as record_all_trie_keys, Error as StorageProofError, - ProofSize as StorageProofSize, RawStorageProof, StorageProofChecker, + grow_storage_proof, grow_storage_value, record_all_keys as record_all_trie_keys, + UnverifiedStorageProofParams, +}; +pub use storage_proof::{ + raw_storage_proof_size, RawStorageProof, StorageProofChecker, StorageProofError, }; pub use storage_types::BoundedStorageValue; -#[cfg(feature = "std")] -pub use storage_proof::craft_valid_storage_proof; - pub mod extensions; pub mod messages; @@ -255,9 +258,9 @@ pub trait StorageMapKeyProvider { /// The same as `StorageMap::Hasher1`. type Hasher: StorageHasher; /// The same as `StorageMap::Key1`. - type Key: FullCodec; + type Key: FullCodec + Send + Sync; /// The same as `StorageMap::Value`. - type Value: FullCodec; + type Value: 'static + FullCodec; /// This is a copy of the /// `frame_support::storage::generator::StorageMap::storage_map_final_key`. @@ -277,13 +280,13 @@ pub trait StorageDoubleMapKeyProvider { /// The same as `StorageDoubleMap::Hasher1`. type Hasher1: StorageHasher; /// The same as `StorageDoubleMap::Key1`. - type Key1: FullCodec; + type Key1: FullCodec + Send + Sync; /// The same as `StorageDoubleMap::Hasher2`. type Hasher2: StorageHasher; /// The same as `StorageDoubleMap::Key2`. - type Key2: FullCodec; + type Key2: FullCodec + Send + Sync; /// The same as `StorageDoubleMap::Value`. - type Value: FullCodec; + type Value: 'static + FullCodec; /// This is a copy of the /// `frame_support::storage::generator::StorageDoubleMap::storage_double_map_final_key`. @@ -461,38 +464,6 @@ macro_rules! generate_static_str_provider { }; } -/// Error message that is only displayable in `std` environment. -#[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct StrippableError { - _phantom_data: sp_std::marker::PhantomData, - #[codec(skip)] - #[cfg(feature = "std")] - message: String, -} - -impl From for StrippableError { - fn from(_err: T) -> Self { - Self { - _phantom_data: Default::default(), - #[cfg(feature = "std")] - message: format!("{:?}", _err), - } - } -} - -impl Debug for StrippableError { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - f.write_str(&self.message) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - f.write_str("Stripped error") - } -} - /// A trait defining helper methods for `RangeInclusive` (start..=end) pub trait RangeInclusiveExt { /// Computes the length of the `RangeInclusive`, checking for underflow and overflow. diff --git a/bridges/primitives/runtime/src/storage_proof.rs b/bridges/primitives/runtime/src/storage_proof.rs index 1b706aa66c16fc73a21ce83f550bea8a8fe128e5..7bfa0d6fde01186f1fe09e66dd3ba1accf286ce5 100644 --- a/bridges/primitives/runtime/src/storage_proof.rs +++ b/bridges/primitives/runtime/src/storage_proof.rs @@ -14,33 +14,91 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Logic for checking Substrate storage proofs. +//! Logic for working with storage proofs. -use crate::StrippableError; -use codec::{Decode, Encode}; use frame_support::PalletError; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; -use scale_info::TypeInfo; -use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; +use sp_core::RuntimeDebug; +use sp_std::{default::Default, vec::Vec}; use sp_trie::{ - read_trie_value, LayoutV1, MemoryDB, Recorder, StorageProof, Trie, TrieConfiguration, - TrieDBBuilder, TrieError, TrieHash, + accessed_nodes_tracker::AccessedNodesTracker, read_trie_value, LayoutV1, MemoryDB, StorageProof, }; +use codec::{Decode, Encode}; +use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use scale_info::TypeInfo; +#[cfg(feature = "test-helpers")] +use sp_trie::{recorder_ext::RecorderExt, Recorder, TrieDBBuilder, TrieError, TrieHash}; +#[cfg(feature = "test-helpers")] +use trie_db::{Trie, TrieConfiguration, TrieDBMut}; + +/// Errors that can occur when interacting with `UnverifiedStorageProof` and `VerifiedStorageProof`. +#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)] +pub enum StorageProofError { + /// Call to `generate_trie_proof()` failed. + UnableToGenerateTrieProof, + /// Call to `verify_trie_proof()` failed. + InvalidProof, + /// The `Vec` entries weren't sorted as expected. + UnsortedEntries, + /// The provided key wasn't found. + UnavailableKey, + /// The value associated to the provided key is `None`. + EmptyVal, + /// Error decoding value associated to a provided key. + DecodeError, + /// At least one key or node wasn't read. + UnusedKey, + + /// Expected storage root is missing from the proof. (for non-compact proofs) + StorageRootMismatch, + /// Unable to reach expected storage value using provided trie nodes. (for non-compact proofs) + StorageValueUnavailable, + /// The proof contains duplicate nodes. (for non-compact proofs) + DuplicateNodes, +} + +impl From for StorageProofError { + fn from(e: sp_trie::StorageProofError) -> Self { + match e { + sp_trie::StorageProofError::DuplicateNodes => StorageProofError::DuplicateNodes, + } + } +} + +impl From for StorageProofError { + fn from(e: sp_trie::accessed_nodes_tracker::Error) -> Self { + match e { + sp_trie::accessed_nodes_tracker::Error::UnusedNodes => StorageProofError::UnusedKey, + } + } +} + /// Raw storage proof type (just raw trie nodes). -pub type RawStorageProof = Vec>; +pub type RawStorageProof = sp_trie::RawStorageProof; + +/// Calculates size for `RawStorageProof`. +pub fn raw_storage_proof_size(raw_storage_proof: &RawStorageProof) -> usize { + raw_storage_proof + .iter() + .fold(0usize, |sum, node| sum.saturating_add(node.len())) +} -/// Storage proof size requirements. +/// Storage values size requirements. /// /// This is currently used by benchmarks when generating storage proofs. -#[derive(Clone, Copy, Debug)] -pub enum ProofSize { - /// The proof is expected to be minimal. If value size may be changed, then it is expected to - /// have given size. - Minimal(u32), - /// The proof is expected to have at least given size and grow by increasing value that is - /// stored in the trie. - HasLargeLeaf(u32), +#[cfg(feature = "test-helpers")] +#[derive(Clone, Copy, Debug, Default)] +pub struct UnverifiedStorageProofParams { + /// Expected storage proof size in bytes. + pub db_size: Option, +} + +#[cfg(feature = "test-helpers")] +impl UnverifiedStorageProofParams { + /// Make storage proof parameters that require proof of at least `db_size` bytes. + pub fn from_db_size(db_size: u32) -> Self { + Self { db_size: Some(db_size) } + } } /// This struct is used to read storage values from a subset of a Merklized database. The "proof" @@ -51,10 +109,9 @@ pub struct StorageProofChecker where H: Hasher, { - proof_nodes_count: usize, root: H::Out, db: MemoryDB, - recorder: Recorder>, + accessed_nodes_tracker: AccessedNodesTracker, } impl StorageProofChecker @@ -64,99 +121,161 @@ where /// Constructs a new storage proof checker. /// /// This returns an error if the given proof is invalid with respect to the given root. - pub fn new(root: H::Out, proof: RawStorageProof) -> Result { - // 1. we don't want extra items in the storage proof - // 2. `StorageProof` is storing all trie nodes in the `BTreeSet` - // - // => someone could simply add duplicate items to the proof and we won't be - // able to detect that by just using `StorageProof` - // - // => let's check it when we are converting our "raw proof" into `StorageProof` - let proof_nodes_count = proof.len(); - let proof = StorageProof::new(proof); - if proof_nodes_count != proof.iter_nodes().count() { - return Err(Error::DuplicateNodesInProof) - } + pub fn new(root: H::Out, proof: RawStorageProof) -> Result { + let proof = StorageProof::new_with_duplicate_nodes_check(proof)?; + + let recorder = AccessedNodesTracker::new(proof.len()); let db = proof.into_memory_db(); if !db.contains(&root, EMPTY_PREFIX) { - return Err(Error::StorageRootMismatch) + return Err(StorageProofError::StorageRootMismatch) } - let recorder = Recorder::default(); - let checker = StorageProofChecker { proof_nodes_count, root, db, recorder }; - Ok(checker) + Ok(StorageProofChecker { root, db, accessed_nodes_tracker: recorder }) } /// Returns error if the proof has some nodes that are left intact by previous `read_value` /// calls. - pub fn ensure_no_unused_nodes(mut self) -> Result<(), Error> { - let visited_nodes = self - .recorder - .drain() - .into_iter() - .map(|record| record.data) - .collect::>(); - let visited_nodes_count = visited_nodes.len(); - if self.proof_nodes_count == visited_nodes_count { - Ok(()) - } else { - Err(Error::UnusedNodesInTheProof) - } + pub fn ensure_no_unused_nodes(self) -> Result<(), StorageProofError> { + self.accessed_nodes_tracker.ensure_no_unused_nodes().map_err(Into::into) } /// Reads a value from the available subset of storage. If the value cannot be read due to an /// incomplete or otherwise invalid proof, this function returns an error. - pub fn read_value(&mut self, key: &[u8]) -> Result>, Error> { + pub fn read_value(&mut self, key: &[u8]) -> Result>, StorageProofError> { // LayoutV1 or LayoutV0 is identical for proof that only read values. - read_trie_value::, _>(&self.db, &self.root, key, Some(&mut self.recorder), None) - .map_err(|_| Error::StorageValueUnavailable) + read_trie_value::, _>( + &self.db, + &self.root, + key, + Some(&mut self.accessed_nodes_tracker), + None, + ) + .map_err(|_| StorageProofError::StorageValueUnavailable) } /// Reads and decodes a value from the available subset of storage. If the value cannot be read /// due to an incomplete or otherwise invalid proof, this function returns an error. If value is /// read, but decoding fails, this function returns an error. - pub fn read_and_decode_value(&mut self, key: &[u8]) -> Result, Error> { + pub fn read_and_decode_value( + &mut self, + key: &[u8], + ) -> Result, StorageProofError> { self.read_value(key).and_then(|v| { - v.map(|v| T::decode(&mut &v[..]).map_err(|e| Error::StorageValueDecodeFailed(e.into()))) - .transpose() + v.map(|v| { + T::decode(&mut &v[..]).map_err(|e| { + log::warn!(target: "bridge-storage-proofs", "read_and_decode_value error: {e:?}"); + StorageProofError::DecodeError + }) + }) + .transpose() }) } /// Reads and decodes a value from the available subset of storage. If the value cannot be read /// due to an incomplete or otherwise invalid proof, or if the value is `None`, this function /// returns an error. If value is read, but decoding fails, this function returns an error. - pub fn read_and_decode_mandatory_value(&mut self, key: &[u8]) -> Result { - self.read_and_decode_value(key)?.ok_or(Error::StorageValueEmpty) + pub fn read_and_decode_mandatory_value( + &mut self, + key: &[u8], + ) -> Result { + self.read_and_decode_value(key)?.ok_or(StorageProofError::EmptyVal) } /// Reads and decodes a value from the available subset of storage. If the value cannot be read /// due to an incomplete or otherwise invalid proof, this function returns `Ok(None)`. /// If value is read, but decoding fails, this function returns an error. - pub fn read_and_decode_opt_value(&mut self, key: &[u8]) -> Result, Error> { + pub fn read_and_decode_opt_value( + &mut self, + key: &[u8], + ) -> Result, StorageProofError> { match self.read_and_decode_value(key) { Ok(outbound_lane_data) => Ok(outbound_lane_data), - Err(Error::StorageValueUnavailable) => Ok(None), + Err(StorageProofError::StorageValueUnavailable) => Ok(None), Err(e) => Err(e), } } } -/// Storage proof related errors. -#[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, Debug, TypeInfo)] -pub enum Error { - /// Duplicate trie nodes are found in the proof. - DuplicateNodesInProof, - /// Unused trie nodes are found in the proof. - UnusedNodesInTheProof, - /// Expected storage root is missing from the proof. - StorageRootMismatch, - /// Unable to reach expected storage value using provided trie nodes. - StorageValueUnavailable, - /// The storage value is `None`. - StorageValueEmpty, - /// Failed to decode storage value. - StorageValueDecodeFailed(StrippableError), +/// Add extra data to the storage value so that it'll be of given size. +#[cfg(feature = "test-helpers")] +pub fn grow_storage_value(mut value: Vec, params: &UnverifiedStorageProofParams) -> Vec { + if let Some(db_size) = params.db_size { + if db_size as usize > value.len() { + value.extend(sp_std::iter::repeat(42u8).take(db_size as usize - value.len())); + } + } + value +} + +/// Insert values in the provided trie at common-prefix keys in order to inflate the resulting +/// storage proof. +/// +/// This function can add at most 15 common-prefix keys per prefix nibble (4 bits). +/// Each such key adds about 33 bytes (a node) to the proof. +#[cfg(feature = "test-helpers")] +pub fn grow_storage_proof( + trie: &mut TrieDBMut, + prefix: Vec, + num_extra_nodes: usize, +) { + use sp_trie::TrieMut; + + let mut added_nodes = 0; + for i in 0..prefix.len() { + let mut prefix = prefix[0..=i].to_vec(); + // 1 byte has 2 nibbles (4 bits each) + let first_nibble = (prefix[i] & 0xf0) >> 4; + let second_nibble = prefix[i] & 0x0f; + + // create branches at the 1st nibble + for branch in 1..=15 { + if added_nodes >= num_extra_nodes { + return + } + + // create branches at the 1st nibble + prefix[i] = (first_nibble.wrapping_add(branch) % 16) << 4; + trie.insert(&prefix, &[0; 32]) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + added_nodes += 1; + } + + // create branches at the 2nd nibble + for branch in 1..=15 { + if added_nodes >= num_extra_nodes { + return + } + + prefix[i] = (first_nibble << 4) | (second_nibble.wrapping_add(branch) % 16); + trie.insert(&prefix, &[0; 32]) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + added_nodes += 1; + } + } + + assert_eq!(added_nodes, num_extra_nodes) +} + +/// Record all keys for a given root. +#[cfg(feature = "test-helpers")] +pub fn record_all_keys( + db: &DB, + root: &TrieHash, +) -> Result>> +where + DB: hash_db::HashDBRef, +{ + let mut recorder = Recorder::::new(); + let trie = TrieDBBuilder::::new(db, root).with_recorder(&mut recorder).build(); + for x in trie.iter()? { + let (key, _) = x?; + trie.get(&key)?; + } + + Ok(recorder.into_raw_storage_proof()) } /// Return valid storage proof and state root. @@ -170,7 +289,7 @@ pub fn craft_valid_storage_proof() -> (sp_core::H256, RawStorageProof) { // construct storage proof let backend = >::from(( - vec![ + sp_std::vec![ (None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]), (None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]), (None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]), @@ -180,41 +299,15 @@ pub fn craft_valid_storage_proof() -> (sp_core::H256, RawStorageProof) { ], state_version, )); - let root = backend.storage_root(std::iter::empty(), state_version).0; + let root = backend.storage_root(sp_std::iter::empty(), state_version).0; let proof = prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key4"[..], &b"key22"[..]]).unwrap(); (root, proof.into_nodes().into_iter().collect()) } -/// Record all keys for a given root. -pub fn record_all_keys( - db: &DB, - root: &TrieHash, -) -> Result>> -where - DB: hash_db::HashDBRef, -{ - let mut recorder = Recorder::::new(); - let trie = TrieDBBuilder::::new(db, root).with_recorder(&mut recorder).build(); - for x in trie.iter()? { - let (key, _) = x?; - trie.get(&key)?; - } - - // recorder may record the same trie node multiple times and we don't want duplicate nodes - // in our proofs => let's deduplicate it by collecting to the BTreeSet first - Ok(recorder - .drain() - .into_iter() - .map(|n| n.data.to_vec()) - .collect::>() - .into_iter() - .collect()) -} - #[cfg(test)] -pub mod tests { +pub mod tests_for_storage_proof_checker { use super::*; use codec::Encode; @@ -228,29 +321,21 @@ pub mod tests { assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec()))); assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec()))); assert_eq!(checker.read_value(b"key4"), Ok(Some((42u64, 42u32, 42u16, 42u8).encode()))); - assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable)); + assert_eq!( + checker.read_value(b"key11111"), + Err(StorageProofError::StorageValueUnavailable) + ); assert_eq!(checker.read_value(b"key22"), Ok(None)); assert_eq!(checker.read_and_decode_value(b"key4"), Ok(Some((42u64, 42u32, 42u16, 42u8))),); assert!(matches!( checker.read_and_decode_value::<[u8; 64]>(b"key4"), - Err(Error::StorageValueDecodeFailed(_)), + Err(StorageProofError::DecodeError), )); // checking proof against invalid commitment fails assert_eq!( >::new(sp_core::H256::random(), proof).err(), - Some(Error::StorageRootMismatch) - ); - } - - #[test] - fn proof_with_duplicate_items_is_rejected() { - let (root, mut proof) = craft_valid_storage_proof(); - proof.push(proof.first().unwrap().clone()); - - assert_eq!( - StorageProofChecker::::new(root, proof).map(drop), - Err(Error::DuplicateNodesInProof), + Some(StorageProofError::StorageRootMismatch) ); } @@ -260,13 +345,13 @@ pub mod tests { let mut checker = StorageProofChecker::::new(root, proof.clone()).unwrap(); - checker.read_value(b"key1").unwrap(); + checker.read_value(b"key1").unwrap().unwrap(); checker.read_value(b"key2").unwrap(); checker.read_value(b"key4").unwrap(); checker.read_value(b"key22").unwrap(); assert_eq!(checker.ensure_no_unused_nodes(), Ok(())); let checker = StorageProofChecker::::new(root, proof).unwrap(); - assert_eq!(checker.ensure_no_unused_nodes(), Err(Error::UnusedNodesInTheProof)); + assert_eq!(checker.ensure_no_unused_nodes(), Err(StorageProofError::UnusedKey)); } } diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index 99f5ee0d1aee4528f64028bbb4ce089cfb6f4c44..5e6e3893393534aa828f323d8a28748742a7b5bb 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -11,19 +11,19 @@ repository.workspace = true workspace = true [dependencies] -bp-header-chain = { path = "../header-chain", default-features = false } -bp-parachains = { path = "../parachains", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -ed25519-dalek = { version = "2.1", default-features = false } -finality-grandpa = { version = "0.16.2", default-features = false } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +bp-header-chain = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { features = ["test-helpers"], workspace = true } +codec = { workspace = true } +ed25519-dalek = { workspace = true } +finality-grandpa = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-grandpa = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs index f4fe4a242e79c0e1c8a499c4dd18ed4a2164c656..9855c32a468954dd0f3011c029c793e5cd2cbc35 100644 --- a/bridges/primitives/test-utils/src/lib.rs +++ b/bridges/primitives/test-utils/src/lib.rs @@ -177,6 +177,7 @@ pub fn prepare_parachain_heads_proof( let mut parachains = Vec::with_capacity(heads.len()); let mut root = Default::default(); let mut mdb = MemoryDB::default(); + let mut storage_keys = vec![]; { let mut trie = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); for (parachain, head) in heads { @@ -185,11 +186,12 @@ pub fn prepare_parachain_heads_proof( trie.insert(&storage_key.0, &head.encode()) .map_err(|_| "TrieMut::insert has failed") .expect("TrieMut::insert should not fail in tests"); + storage_keys.push(storage_key.0); parachains.push((ParaId(parachain), head.hash())); } } - // generate storage proof to be delivered to This chain + // generate storage proof to be delivered to this chain let storage_proof = record_all_trie_keys::, _>(&mdb, &root) .map_err(|_| "record_all_trie_keys has failed") .expect("record_all_trie_keys should not fail in benchmarks"); diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index b94e722024562e526c33d2bf1efe9b89f1a035aa..c3cf3356184be676ffae0c212fc20455395d6d09 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -11,12 +11,12 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive"], workspace = true } # Substrate Dependencies -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-runtime = { workspace = true } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/xcm-bridge-hub/Cargo.toml b/bridges/primitives/xcm-bridge-hub/Cargo.toml index 27881bc99d1f838bb5a72c02fe565ef5dc0307fd..932e9ade019741dbc6a99fcea317aaee539ed9c9 100644 --- a/bridges/primitives/xcm-bridge-hub/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # Substrate Dependencies -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/relays/client-substrate/Cargo.toml b/bridges/relays/client-substrate/Cargo.toml index cb7eae4f340c7375ad69b111f6b561c84bc57144..969cd73d6194fcf42e03f54ae14029cfdf73d877 100644 --- a/bridges/relays/client-substrate/Cargo.toml +++ b/bridges/relays/client-substrate/Cargo.toml @@ -11,50 +11,49 @@ publish = false workspace = true [dependencies] -async-std = { version = "1.9.0", features = ["attributes"] } -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -jsonrpsee = { version = "0.22", features = ["macros", "ws-client"] } +async-std = { features = ["attributes"], workspace = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +jsonrpsee = { features = ["macros", "ws-client"], workspace = true } log = { workspace = true } -num-traits = "0.2" -rand = "0.8.5" -scale-info = { version = "2.11.1", features = ["derive"] } -tokio = { version = "1.37", features = ["rt-multi-thread"] } +num-traits = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +serde_json = { workspace = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } +tokio = { features = ["rt-multi-thread"], workspace = true, default-features = true } thiserror = { workspace = true } +quick_cache = { workspace = true } # Bridge dependencies -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-runtime = { path = "../../primitives/runtime" } -pallet-bridge-messages = { path = "../../modules/messages" } -finality-relay = { path = "../finality" } -relay-utils = { path = "../utils" } +bp-header-chain = { workspace = true, default-features = true } +bp-messages = { workspace = true, default-features = true } +bp-polkadot-core = { workspace = true, default-features = true } +bp-runtime = { workspace = true, default-features = true } +finality-relay = { workspace = true } +relay-utils = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support" } -frame-system = { path = "../../../substrate/frame/system" } -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } -pallet-utility = { path = "../../../substrate/frame/utility" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-rpc-api = { path = "../../../substrate/client/rpc-api" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-rpc = { path = "../../../substrate/primitives/rpc" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-trie = { path = "../../../substrate/primitives/trie" } -sp-version = { path = "../../../substrate/primitives/version" } +frame-support = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } # Polkadot Dependencies -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm" } +xcm = { workspace = true, default-features = true } [features] default = [] diff --git a/bridges/relays/client-substrate/src/chain.rs b/bridges/relays/client-substrate/src/chain.rs index 40269fe64c879249e9f0ed5ffe070d9fc606bdb6..227e9c31c5bfc6a93df88bb117f29bdb1b7c61d3 100644 --- a/bridges/relays/client-substrate/src/chain.rs +++ b/bridges/relays/client-substrate/src/chain.rs @@ -36,6 +36,9 @@ use sp_runtime::{ }; use std::{fmt::Debug, time::Duration}; +/// Signed block type of given chain. +pub type SignedBlockOf = ::SignedBlock; + /// Substrate-based chain from minimal relay-client point of view. pub trait Chain: ChainBase + Clone { /// Chain name. diff --git a/bridges/relays/client-substrate/src/client.rs b/bridges/relays/client-substrate/src/client.rs deleted file mode 100644 index 2e7cb7455f76cceee1c63aae4efb4a5cfe9f2a69..0000000000000000000000000000000000000000 --- a/bridges/relays/client-substrate/src/client.rs +++ /dev/null @@ -1,1032 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node client. - -use crate::{ - chain::{Chain, ChainWithTransactions}, - guard::Environment, - rpc::{ - SubstrateAuthorClient, SubstrateChainClient, SubstrateFinalityClient, - SubstrateFrameSystemClient, SubstrateStateClient, SubstrateSystemClient, - }, - transaction_stall_timeout, AccountKeyPairOf, ChainWithGrandpa, ConnectionParams, Error, HashOf, - HeaderIdOf, Result, SignParam, TransactionTracker, UnsignedTransaction, -}; - -use async_std::sync::{Arc, Mutex, RwLock}; -use async_trait::async_trait; -use bp_runtime::{HeaderIdProvider, StorageDoubleMapKeyProvider, StorageMapKeyProvider}; -use codec::{Decode, Encode}; -use frame_support::weights::Weight; -use futures::{SinkExt, StreamExt}; -use jsonrpsee::{ - core::DeserializeOwned, - ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}, -}; -use num_traits::{Saturating, Zero}; -use pallet_transaction_payment::RuntimeDispatchInfo; -use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; -use sp_core::{ - storage::{StorageData, StorageKey}, - Bytes, Hasher, Pair, -}; -use sp_runtime::{ - traits::Header as HeaderT, - transaction_validity::{TransactionSource, TransactionValidity}, -}; -use sp_trie::StorageProof; -use sp_version::RuntimeVersion; -use std::{cmp::Ordering, future::Future}; - -const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; -const SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF: &str = - "GrandpaApi_generate_key_ownership_proof"; -const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; -const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; -const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; - -/// The difference between best block number and number of its ancestor, that is enough -/// for us to consider that ancestor an "ancient" block with dropped state. -/// -/// The relay does not assume that it is connected to the archive node, so it always tries -/// to use the best available chain state. But sometimes it still may use state of some -/// old block. If the state of that block is already dropped, relay will see errors when -/// e.g. it tries to prove something. -/// -/// By default Substrate-based nodes are storing state for last 256 blocks. We'll use -/// half of this value. -pub const ANCIENT_BLOCK_THRESHOLD: u32 = 128; - -/// Returns `true` if we think that the state is already discarded for given block. -pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: N) -> bool { - best.saturating_sub(block) >= N::from(ANCIENT_BLOCK_THRESHOLD) -} - -/// Opaque justifications subscription type. -pub struct Subscription( - pub(crate) Mutex>>, - // The following field is not explicitly used by the code. But when it is dropped, - // the bakground task receives a shutdown signal. - #[allow(dead_code)] pub(crate) futures::channel::oneshot::Sender<()>, -); - -/// Opaque GRANDPA authorities set. -pub type OpaqueGrandpaAuthoritiesSet = Vec; - -/// A simple runtime version. It only includes the `spec_version` and `transaction_version`. -#[derive(Copy, Clone, Debug)] -pub struct SimpleRuntimeVersion { - /// Version of the runtime specification. - pub spec_version: u32, - /// All existing dispatches are fully compatible when this number doesn't change. - pub transaction_version: u32, -} - -impl SimpleRuntimeVersion { - /// Create a new instance of `SimpleRuntimeVersion` from a `RuntimeVersion`. - pub const fn from_runtime_version(runtime_version: &RuntimeVersion) -> Self { - Self { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - } - } -} - -/// Chain runtime version in client -#[derive(Copy, Clone, Debug)] -pub enum ChainRuntimeVersion { - /// Auto query from chain. - Auto, - /// Custom runtime version, defined by user. - Custom(SimpleRuntimeVersion), -} - -/// Substrate client type. -/// -/// Cloning `Client` is a cheap operation that only clones internal references. Different -/// clones of the same client are guaranteed to use the same references. -pub struct Client { - // Lock order: `submit_signed_extrinsic_lock`, `data` - /// Client connection params. - params: Arc, - /// Saved chain runtime version. - chain_runtime_version: ChainRuntimeVersion, - /// If several tasks are submitting their transactions simultaneously using - /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of - /// transactions will be rejected from the pool. This lock is here to prevent situations like - /// that. - submit_signed_extrinsic_lock: Arc>, - /// Genesis block hash. - genesis_hash: HashOf, - /// Shared dynamic data. - data: Arc>, -} - -/// Client data, shared by all `Client` clones. -struct ClientData { - /// Tokio runtime handle. - tokio: Arc, - /// Substrate RPC client. - client: Arc, -} - -/// Already encoded value. -struct PreEncoded(Vec); - -impl Encode for PreEncoded { - fn encode(&self) -> Vec { - self.0.clone() - } -} - -#[async_trait] -impl relay_utils::relay_loop::Client for Client { - type Error = Error; - - async fn reconnect(&mut self) -> Result<()> { - let mut data = self.data.write().await; - let (tokio, client) = Self::build_client(&self.params).await?; - data.tokio = tokio; - data.client = client; - Ok(()) - } -} - -impl Clone for Client { - fn clone(&self) -> Self { - Client { - params: self.params.clone(), - chain_runtime_version: self.chain_runtime_version, - submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), - genesis_hash: self.genesis_hash, - data: self.data.clone(), - } - } -} - -impl std::fmt::Debug for Client { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Client").field("genesis_hash", &self.genesis_hash).finish() - } -} - -impl Client { - /// Returns client that is able to call RPCs on Substrate node over websocket connection. - /// - /// This function will keep connecting to given Substrate node until connection is established - /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. - pub async fn new(params: ConnectionParams) -> Self { - let params = Arc::new(params); - loop { - match Self::try_connect(params.clone()).await { - Ok(client) => return client, - Err(error) => log::error!( - target: "bridge", - "Failed to connect to {} node: {:?}. Going to retry in {}s", - C::NAME, - error, - RECONNECT_DELAY.as_secs(), - ), - } - - async_std::task::sleep(RECONNECT_DELAY).await; - } - } - - /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection - /// has been established or error otherwise. - pub async fn try_connect(params: Arc) -> Result { - let (tokio, client) = Self::build_client(¶ms).await?; - - let number: C::BlockNumber = Zero::zero(); - let genesis_hash_client = client.clone(); - let genesis_hash = tokio - .spawn(async move { - SubstrateChainClient::::block_hash(&*genesis_hash_client, Some(number)).await - }) - .await??; - - let chain_runtime_version = params.chain_runtime_version; - let mut client = Self { - params, - chain_runtime_version, - submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), - genesis_hash, - data: Arc::new(RwLock::new(ClientData { tokio, client })), - }; - Self::ensure_correct_runtime_version(&mut client, chain_runtime_version).await?; - Ok(client) - } - - // Check runtime version to understand if we need are connected to expected version, or we - // need to wait for upgrade, we need to abort immediately. - async fn ensure_correct_runtime_version>( - env: &mut E, - expected: ChainRuntimeVersion, - ) -> Result<()> { - // we are only interested if version mode is bundled or passed using CLI - let expected = match expected { - ChainRuntimeVersion::Auto => return Ok(()), - ChainRuntimeVersion::Custom(expected) => expected, - }; - - // we need to wait if actual version is < than expected, we are OK of versions are the - // same and we need to abort if actual version is > than expected - let actual = SimpleRuntimeVersion::from_runtime_version(&env.runtime_version().await?); - match actual.spec_version.cmp(&expected.spec_version) { - Ordering::Less => - Err(Error::WaitingForRuntimeUpgrade { chain: C::NAME.into(), expected, actual }), - Ordering::Equal => Ok(()), - Ordering::Greater => { - log::error!( - target: "bridge", - "The {} client is configured to use runtime version {expected:?} and actual \ - version is {actual:?}. Aborting", - C::NAME, - ); - env.abort().await; - Err(Error::Custom("Aborted".into())) - }, - } - } - - /// Build client to use in connection. - async fn build_client( - params: &ConnectionParams, - ) -> Result<(Arc, Arc)> { - let tokio = tokio::runtime::Runtime::new()?; - - let uri = match params.uri { - Some(ref uri) => uri.clone(), - None => { - format!( - "{}://{}:{}{}", - if params.secure { "wss" } else { "ws" }, - params.host, - params.port, - match params.path { - Some(ref path) => format!("/{}", path), - None => String::new(), - }, - ) - }, - }; - log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri); - - let client = tokio - .spawn(async move { - RpcClientBuilder::default() - .max_buffer_capacity_per_subscription(MAX_SUBSCRIPTION_CAPACITY) - .build(&uri) - .await - }) - .await??; - - Ok((Arc::new(tokio), Arc::new(client))) - } -} - -impl Client { - /// Return simple runtime version, only include `spec_version` and `transaction_version`. - pub async fn simple_runtime_version(&self) -> Result { - Ok(match &self.chain_runtime_version { - ChainRuntimeVersion::Auto => { - let runtime_version = self.runtime_version().await?; - SimpleRuntimeVersion::from_runtime_version(&runtime_version) - }, - ChainRuntimeVersion::Custom(version) => *version, - }) - } - - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - self.jsonrpsee_execute(|client| async move { - let health = SubstrateSystemClient::::health(&*client).await?; - let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); - if is_synced { - Ok(()) - } else { - Err(Error::ClientNotSynced(health)) - } - }) - .await - } - - /// Return hash of the genesis block. - pub fn genesis_hash(&self) -> &C::Hash { - &self.genesis_hash - } - - /// Return hash of the best finalized block. - pub async fn best_finalized_header_hash(&self) -> Result { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::finalized_head(&*client).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestFinalizedHeaderHash { - chain: C::NAME.into(), - error: e.boxed(), - }) - } - - /// Return number of the best finalized block. - pub async fn best_finalized_header_number(&self) -> Result { - Ok(*self.best_finalized_header().await?.number()) - } - - /// Return header of the best finalized block. - pub async fn best_finalized_header(&self) -> Result { - self.header_by_hash(self.best_finalized_header_hash().await?).await - } - - /// Returns the best Substrate header. - pub async fn best_header(&self) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::header(&*client, None).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestHeader { chain: C::NAME.into(), error: e.boxed() }) - } - - /// Get a Substrate block from its hash. - pub async fn get_block(&self, block_hash: Option) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block(&*client, block_hash).await?) - }) - .await - } - - /// Get a Substrate header by its hash. - pub async fn header_by_hash(&self, block_hash: C::Hash) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::header(&*client, Some(block_hash)).await?) - }) - .await - .map_err(|e| Error::FailedToReadHeaderByHash { - chain: C::NAME.into(), - hash: format!("{block_hash}"), - error: e.boxed(), - }) - } - - /// Get a Substrate block hash by its number. - pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block_hash(&*client, Some(number)).await?) - }) - .await - } - - /// Get a Substrate header by its number. - pub async fn header_by_number(&self, block_number: C::BlockNumber) -> Result - where - C::Header: DeserializeOwned, - { - let block_hash = Self::block_hash_by_number(self, block_number).await?; - let header_by_hash = Self::header_by_hash(self, block_hash).await?; - Ok(header_by_hash) - } - - /// Return runtime version. - pub async fn runtime_version(&self) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::runtime_version(&*client).await?) - }) - .await - } - - /// Read value from runtime storage. - pub async fn storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `MapStorage` value from runtime storage. - pub async fn storage_map_value( - &self, - pallet_prefix: &str, - key: &T::Key, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `DoubleMapStorage` value from runtime storage. - pub async fn storage_double_map_value( - &self, - pallet_prefix: &str, - key1: &T::Key1, - key2: &T::Key2, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key1, key2); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read raw value from runtime storage. - pub async fn raw_storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - let cloned_storage_key = storage_key.clone(); - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::storage(&*client, storage_key.clone(), block_hash) - .await?) - }) - .await - .map_err(|e| Error::FailedToReadRuntimeStorageValue { - chain: C::NAME.into(), - key: cloned_storage_key, - error: e.boxed(), - }) - } - - /// Get the nonce of the given Substrate account. - /// - /// Note: It's the caller's responsibility to make sure `account` is a valid SS58 address. - pub async fn next_account_index(&self, account: C::AccountId) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateFrameSystemClient::::account_next_index(&*client, account).await?) - }) - .await - } - - /// Submit unsigned extrinsic for inclusion in a block. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result { - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - let best_header_hash = self.best_header().await?.hash(); - self.validate_transaction(best_header_hash, PreEncoded(transaction.0.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = SubstrateAuthorClient::::submit_extrinsic(&*client, transaction) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - async fn build_sign_params(&self, signer: AccountKeyPairOf) -> Result> - where - C: ChainWithTransactions, - { - let runtime_version = self.simple_runtime_version().await?; - Ok(SignParam:: { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - genesis_hash: self.genesis_hash, - signer, - }) - } - - /// Submit an extrinsic signed by given account. - /// - /// All calls of this method are synchronized, so there can't be more than one active - /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen - /// if all client instances are clones of the same initial `Client`. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let signing_data = self.build_sign_params(signer.clone()).await?; - - // By using parent of best block here, we are protecing again best-block reorganizations. - // E.g. transaction may have been submitted when the best block was `A[num=100]`. Then it - // has been changed to `B[num=100]`. Hash of `A` has been included into transaction - // signature payload. So when signature will be checked, the check will fail and transaction - // will be dropped from the pool. - let best_header_id = best_header.parent_id().unwrap_or_else(|| best_header.id()); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = - SubstrateAuthorClient::::submit_extrinsic(&*client, Bytes(signed_extrinsic)) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status - /// after submission. - pub async fn submit_and_watch_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result> - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let self_clone = self.clone(); - let signing_data = self.build_sign_params(signer.clone()).await?; - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let best_header_id = best_header.id(); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let stall_timeout = transaction_stall_timeout( - extrinsic.era.mortality_period(), - C::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ); - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - let (tracker, subscription) = self - .jsonrpsee_execute(move |client| async move { - let tx_hash = C::Hasher::hash(&signed_extrinsic); - let subscription = SubstrateAuthorClient::::submit_and_watch_extrinsic( - &*client, - Bytes(signed_extrinsic), - ) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - let tracker = TransactionTracker::new( - self_clone, - stall_timeout, - tx_hash, - Subscription(Mutex::new(receiver), cancel_sender), - ); - Ok((tracker, subscription)) - }) - .await?; - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "extrinsic".into(), - subscription, - sender, - cancel_receiver, - )); - Ok(tracker) - } - - /// Returns pending extrinsics from transaction pool. - pub async fn pending_extrinsics(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateAuthorClient::::pending_extrinsics(&*client).await?) - }) - .await - } - - /// Validate transaction at given block state. - pub async fn validate_transaction( - &self, - at_block: C::Hash, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_TXPOOL_VALIDATE_TRANSACTION.to_string(); - let data = Bytes((TransactionSource::External, transaction, at_block).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(at_block)).await?; - let validity = TransactionValidity::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(validity) - }) - .await - } - - /// Returns weight of the given transaction. - pub async fn extimate_extrinsic_weight( - &self, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let transaction_len = transaction.encoded_size() as u32; - - let call = SUB_API_TX_PAYMENT_QUERY_INFO.to_string(); - let data = Bytes((transaction, transaction_len).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, None).await?; - let dispatch_info = - RuntimeDispatchInfo::::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(dispatch_info.weight) - }) - .await - } - - /// Get the GRANDPA authority set at given block. - pub async fn grandpa_authorities_set( - &self, - block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(block)).await?; - let authority_list = encoded_response.0; - - Ok(authority_list) - }) - .await - } - - /// Execute runtime call at given block, provided the input and output types. - /// It also performs the input encode and output decode. - pub async fn typed_state_call( - &self, - method_name: String, - input: Input, - at_block: Option, - ) -> Result { - let encoded_output = self - .state_call(method_name.clone(), Bytes(input.encode()), at_block) - .await - .map_err(|e| Error::ErrorExecutingRuntimeCall { - chain: C::NAME.into(), - method: method_name, - error: e.boxed(), - })?; - Output::decode(&mut &encoded_output.0[..]).map_err(Error::ResponseParseFailed) - } - - /// Execute runtime call at given block. - pub async fn state_call( - &self, - method: String, - data: Bytes, - at_block: Option, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::call(&*client, method, data, at_block) - .await - .map_err(Into::into) - }) - .await - } - - /// Returns storage proof of given storage keys. - pub async fn prove_storage( - &self, - keys: Vec, - at_block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::prove_storage(&*client, keys, Some(at_block)) - .await - .map(|proof| { - StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect::>()) - }) - .map_err(Into::into) - }) - .await - } - - /// Return `tokenDecimals` property from the set of chain properties. - pub async fn token_decimals(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - let system_properties = SubstrateSystemClient::::properties(&*client).await?; - Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) - }) - .await - } - - /// Return new finality justifications stream. - pub async fn subscribe_finality_justifications>( - &self, - ) -> Result> { - let subscription = self - .jsonrpsee_execute(move |client| async move { - Ok(FC::subscribe_justifications(&client).await?) - }) - .await?; - let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "justification".into(), - subscription, - sender, - cancel_receiver, - )); - Ok(Subscription(Mutex::new(receiver), cancel_sender)) - } - - /// Generates a proof of key ownership for the given authority in the given set. - pub async fn generate_grandpa_key_ownership_proof( - &self, - at: HashOf, - set_id: sp_consensus_grandpa::SetId, - authority_id: sp_consensus_grandpa::AuthorityId, - ) -> Result> - where - C: ChainWithGrandpa, - { - self.typed_state_call( - SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF.into(), - (set_id, authority_id), - Some(at), - ) - .await - } - - /// Execute jsonrpsee future in tokio context. - async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result - where - MF: FnOnce(Arc) -> F + Send + 'static, - F: Future> + Send + 'static, - T: Send + 'static, - { - let data = self.data.read().await; - let client = data.client.clone(); - data.tokio.spawn(make_jsonrpsee_future(client)).await? - } - - /// Returns `true` if version guard can be started. - /// - /// There's no reason to run version guard when version mode is set to `Auto`. It can - /// lead to relay shutdown when chain is upgraded, even though we have explicitly - /// said that we don't want to shutdown. - pub fn can_start_version_guard(&self) -> bool { - !matches!(self.chain_runtime_version, ChainRuntimeVersion::Auto) - } -} - -impl Subscription { - /// Consumes subscription and returns future statuses stream. - pub fn into_stream(self) -> impl futures::Stream { - futures::stream::unfold(Some(self), |mut this| async move { - let Some(this) = this.take() else { return None }; - let item = this.0.lock().await.next().await.unwrap_or(None); - match item { - Some(item) => Some((item, Some(this))), - None => { - // let's make it explicit here - let _ = this.1.send(()); - None - }, - } - }) - } - - /// Return next item from the subscription. - pub async fn next(&self) -> Result> { - let mut receiver = self.0.lock().await; - let item = receiver.next().await; - Ok(item.unwrap_or(None)) - } - - /// Background worker that is executed in tokio context as `jsonrpsee` requires. - async fn background_worker( - chain_name: String, - item_type: String, - subscription: jsonrpsee::core::client::Subscription, - mut sender: futures::channel::mpsc::Sender>, - cancel_receiver: futures::channel::oneshot::Receiver<()>, - ) { - log::trace!( - target: "bridge", - "Starting background worker for {} {} subscription stream.", - chain_name, - item_type, - ); - - futures::pin_mut!(subscription, cancel_receiver); - loop { - match futures::future::select(subscription.next(), &mut cancel_receiver).await { - futures::future::Either::Left((Some(Ok(item)), _)) => - if sender.send(Some(item)).await.is_err() { - log::trace!( - target: "bridge", - "{} {} subscription stream: no listener. Stopping background worker.", - chain_name, - item_type, - ); - - break - }, - futures::future::Either::Left((Some(Err(e)), _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted. Stopping background worker.", - chain_name, - item_type, - e, - ); - let _ = sender.send(None).await; - break - }, - futures::future::Either::Left((None, _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned None. Stream needs to be restarted. Stopping background worker.", - chain_name, - item_type, - ); - let _ = sender.send(None).await; - break - }, - futures::future::Either::Right((_, _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream: listener has been dropped. Stopping background worker.", - chain_name, - item_type, - ); - break; - }, - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{guard::tests::TestEnvironment, test_chain::TestChain}; - use futures::{channel::mpsc::unbounded, FutureExt}; - - async fn run_ensure_correct_runtime_version( - expected: ChainRuntimeVersion, - actual: RuntimeVersion, - ) -> Result<()> { - let ( - (mut runtime_version_tx, runtime_version_rx), - (slept_tx, _slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded()); - runtime_version_tx.send(actual).await.unwrap(); - let mut env = TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }; - - let ensure_correct_runtime_version = - Client::::ensure_correct_runtime_version(&mut env, expected).boxed(); - let aborted = aborted_rx.next().map(|_| Err(Error::Custom("".into()))).boxed(); - futures::pin_mut!(ensure_correct_runtime_version, aborted); - futures::future::select(ensure_correct_runtime_version, aborted) - .await - .into_inner() - .0 - } - - #[async_std::test] - async fn ensure_correct_runtime_version_works() { - // when we are configured to use auto version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Auto, - RuntimeVersion { - spec_version: 100, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Ok(()), - )); - // when actual == expected - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { - spec_version: 100, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Ok(()), - )); - // when actual spec version < expected spec version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { spec_version: 99, transaction_version: 100, ..Default::default() }, - ) - .await, - Err(Error::WaitingForRuntimeUpgrade { - expected: SimpleRuntimeVersion { spec_version: 100, transaction_version: 100 }, - actual: SimpleRuntimeVersion { spec_version: 99, transaction_version: 100 }, - .. - }), - )); - // when actual spec version > expected spec version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { - spec_version: 101, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Err(Error::Custom(_)), - )); - } -} diff --git a/bridges/relays/client-substrate/src/client/caching.rs b/bridges/relays/client-substrate/src/client/caching.rs new file mode 100644 index 0000000000000000000000000000000000000000..a574e5985bc8280e030cfad08308a2c9bebe33a4 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/caching.rs @@ -0,0 +1,472 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Client implementation that is caching (whenever possible) results of its backend +//! method calls. + +use crate::{ + client::{Client, SubscriptionBroadcaster}, + error::{Error, Result}, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, ChainWithGrandpa, ChainWithTransactions, + HashOf, HeaderIdOf, HeaderOf, NonceOf, SignedBlockOf, SimpleRuntimeVersion, Subscription, + TransactionTracker, UnsignedTransaction, ANCIENT_BLOCK_THRESHOLD, +}; +use std::{cmp::Ordering, future::Future, task::Poll}; + +use async_std::{ + sync::{Arc, Mutex, RwLock}, + task::JoinHandle, +}; +use async_trait::async_trait; +use codec::Encode; +use frame_support::weights::Weight; +use futures::{FutureExt, StreamExt}; +use quick_cache::unsync::Cache; +use sp_consensus_grandpa::{AuthorityId, OpaqueKeyOwnershipProof, SetId}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Pair, +}; +use sp_runtime::{traits::Header as _, transaction_validity::TransactionValidity}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; + +/// `quick_cache::unsync::Cache` wrapped in async-aware synchronization primitives. +type SyncCache = Arc>>; + +/// Client implementation that is caching (whenever possible) results of its backend +/// method calls. Apart from caching call results, it also supports some (at the +/// moment: justifications) subscription sharing, meaning that the single server +/// subscription may be shared by multiple subscribers at the client side. +#[derive(Clone)] +pub struct CachingClient> { + backend: B, + data: Arc>, +} + +/// Client data, shared by all `CachingClient` clones. +struct ClientData { + grandpa_justifications: Arc>>>, + beefy_justifications: Arc>>>, + background_task_handle: Arc>>>, + best_header: Arc>>>, + best_finalized_header: Arc>>>, + // `quick_cache::sync::Cache` has the `get_or_insert_async` method, which fits our needs, + // but it uses synchronization primitives that are not aware of async execution. They + // can block the executor threads and cause deadlocks => let's use primitives from + // `async_std` crate around `quick_cache::unsync::Cache` + header_hash_by_number_cache: SyncCache, HashOf>, + header_by_hash_cache: SyncCache, HeaderOf>, + block_by_hash_cache: SyncCache, SignedBlockOf>, + raw_storage_value_cache: SyncCache<(HashOf, StorageKey), Option>, + state_call_cache: SyncCache<(HashOf, String, Bytes), Bytes>, +} + +impl> CachingClient { + /// Creates new `CachingClient` on top of given `backend`. + pub async fn new(backend: B) -> Self { + // most of relayer operations will never touch more than `ANCIENT_BLOCK_THRESHOLD` + // headers, so we'll use this as a cache capacity for all chain-related caches + let chain_state_capacity = ANCIENT_BLOCK_THRESHOLD as usize; + let best_header = Arc::new(RwLock::new(None)); + let best_finalized_header = Arc::new(RwLock::new(None)); + let header_by_hash_cache = Arc::new(RwLock::new(Cache::new(chain_state_capacity))); + let background_task_handle = Self::start_background_task( + backend.clone(), + best_header.clone(), + best_finalized_header.clone(), + header_by_hash_cache.clone(), + ) + .await; + CachingClient { + backend, + data: Arc::new(ClientData { + grandpa_justifications: Arc::new(Mutex::new(None)), + beefy_justifications: Arc::new(Mutex::new(None)), + background_task_handle: Arc::new(Mutex::new(background_task_handle)), + best_header, + best_finalized_header, + header_hash_by_number_cache: Arc::new(RwLock::new(Cache::new( + chain_state_capacity, + ))), + header_by_hash_cache, + block_by_hash_cache: Arc::new(RwLock::new(Cache::new(chain_state_capacity))), + raw_storage_value_cache: Arc::new(RwLock::new(Cache::new(1_024))), + state_call_cache: Arc::new(RwLock::new(Cache::new(1_024))), + }), + } + } + + /// Try to get value from the cache, or compute and insert it using given future. + async fn get_or_insert_async( + &self, + cache: &Arc>>, + key: &K, + with: impl std::future::Future>, + ) -> Result { + // try to get cached value first using read lock + { + let cache = cache.read().await; + if let Some(value) = cache.get(key) { + return Ok(value.clone()) + } + } + + // let's compute the value without holding any locks - it may cause additional misses and + // double insertions, but that's better than holding a lock for a while + let value = with.await?; + + // insert/update the value in the cache + cache.write().await.insert(key.clone(), value.clone()); + Ok(value) + } + + /// Subscribe to finality justifications, trying to reuse existing subscription. + async fn subscribe_finality_justifications<'a>( + &'a self, + maybe_broadcaster: &Mutex>>, + do_subscribe: impl Future>> + 'a, + ) -> Result> { + let mut maybe_broadcaster = maybe_broadcaster.lock().await; + let broadcaster = match maybe_broadcaster.as_ref() { + Some(justifications) => justifications, + None => { + let broadcaster = match SubscriptionBroadcaster::new(do_subscribe.await?) { + Ok(broadcaster) => broadcaster, + Err(subscription) => return Ok(subscription), + }; + maybe_broadcaster.get_or_insert(broadcaster) + }, + }; + + broadcaster.subscribe().await + } + + /// Start background task that reads best (and best finalized) headers from subscriptions. + async fn start_background_task( + backend: B, + best_header: Arc>>>, + best_finalized_header: Arc>>>, + header_by_hash_cache: SyncCache, HeaderOf>, + ) -> JoinHandle> { + async_std::task::spawn(async move { + // initialize by reading headers directly from backend to avoid doing that in the + // high-level code + let mut last_finalized_header = + backend.header_by_hash(backend.best_finalized_header_hash().await?).await?; + *best_header.write().await = Some(backend.best_header().await?); + *best_finalized_header.write().await = Some(last_finalized_header.clone()); + + // ...and then continue with subscriptions + let mut best_headers = backend.subscribe_best_headers().await?; + let mut finalized_headers = backend.subscribe_finalized_headers().await?; + loop { + futures::select! { + new_best_header = best_headers.next().fuse() => { + // we assume that the best header is always the actual best header, even if its + // number is lower than the number of previous-best-header (chain may use its own + // best header selection algorithms) + let new_best_header = new_best_header + .ok_or_else(|| Error::ChannelError(format!("Mandatory best headers subscription for {} has finished", C::NAME)))?; + let new_best_header_hash = new_best_header.hash(); + header_by_hash_cache.write().await.insert(new_best_header_hash, new_best_header.clone()); + *best_header.write().await = Some(new_best_header); + }, + new_finalized_header = finalized_headers.next().fuse() => { + // in theory we'll always get finalized headers in order, but let's double check + let new_finalized_header = new_finalized_header. + ok_or_else(|| Error::ChannelError(format!("Finalized headers subscription for {} has finished", C::NAME)))?; + let new_finalized_header_number = *new_finalized_header.number(); + let last_finalized_header_number = *last_finalized_header.number(); + match new_finalized_header_number.cmp(&last_finalized_header_number) { + Ordering::Greater => { + let new_finalized_header_hash = new_finalized_header.hash(); + header_by_hash_cache.write().await.insert(new_finalized_header_hash, new_finalized_header.clone()); + *best_finalized_header.write().await = Some(new_finalized_header.clone()); + last_finalized_header = new_finalized_header; + }, + Ordering::Less => { + return Err(Error::unordered_finalized_headers::( + new_finalized_header_number, + last_finalized_header_number, + )); + }, + _ => (), + } + }, + } + } + }) + } + + /// Ensure that the background task is active. + async fn ensure_background_task_active(&self) -> Result<()> { + let mut background_task_handle = self.data.background_task_handle.lock().await; + if let Poll::Ready(result) = futures::poll!(&mut *background_task_handle) { + return Err(Error::ChannelError(format!( + "Background task of {} client has exited with result: {:?}", + C::NAME, + result + ))) + } + + Ok(()) + } + + /// Try to get header, read elsewhere by background task through subscription. + async fn read_header_from_background<'a>( + &'a self, + header: &Arc>>>, + read_header_from_backend: impl Future>> + 'a, + ) -> Result> { + // ensure that the background task is active + self.ensure_background_task_active().await?; + + // now we know that the background task is active, so we could trust that the + // `header` has the most recent updates from it + match header.read().await.clone() { + Some(header) => Ok(header), + None => { + // header has not yet been read from the subscription, which means that + // we are just starting - let's read header directly from backend this time + read_header_from_backend.await + }, + } + } +} + +impl> std::fmt::Debug for CachingClient { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_fmt(format_args!("CachingClient<{:?}>", self.backend)) + } +} + +#[async_trait] +impl> Client for CachingClient { + async fn ensure_synced(&self) -> Result<()> { + self.backend.ensure_synced().await + } + + async fn reconnect(&self) -> Result<()> { + self.backend.reconnect().await?; + // since we have new underlying client, we need to restart subscriptions too + *self.data.grandpa_justifications.lock().await = None; + *self.data.beefy_justifications.lock().await = None; + // also restart background task too + *self.data.best_header.write().await = None; + *self.data.best_finalized_header.write().await = None; + *self.data.background_task_handle.lock().await = Self::start_background_task( + self.backend.clone(), + self.data.best_header.clone(), + self.data.best_finalized_header.clone(), + self.data.header_by_hash_cache.clone(), + ) + .await; + Ok(()) + } + + fn genesis_hash(&self) -> HashOf { + self.backend.genesis_hash() + } + + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result> { + self.get_or_insert_async( + &self.data.header_hash_by_number_cache, + &number, + self.backend.header_hash_by_number(number), + ) + .await + } + + async fn header_by_hash(&self, hash: HashOf) -> Result> { + self.get_or_insert_async( + &self.data.header_by_hash_cache, + &hash, + self.backend.header_by_hash(hash), + ) + .await + } + + async fn block_by_hash(&self, hash: HashOf) -> Result> { + self.get_or_insert_async( + &self.data.block_by_hash_cache, + &hash, + self.backend.block_by_hash(hash), + ) + .await + } + + async fn best_finalized_header_hash(&self) -> Result> { + self.read_header_from_background( + &self.data.best_finalized_header, + self.backend.best_finalized_header(), + ) + .await + .map(|h| h.hash()) + } + + async fn best_header(&self) -> Result> { + self.read_header_from_background(&self.data.best_header, self.backend.best_header()) + .await + } + + async fn subscribe_best_headers(&self) -> Result>> { + // we may share the sunbscription here, but atm there's no callers of this method + self.backend.subscribe_best_headers().await + } + + async fn subscribe_finalized_headers(&self) -> Result>> { + // we may share the sunbscription here, but atm there's no callers of this method + self.backend.subscribe_finalized_headers().await + } + + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa, + { + self.subscribe_finality_justifications( + &self.data.grandpa_justifications, + self.backend.subscribe_grandpa_finality_justifications(), + ) + .await + } + + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: SetId, + authority_id: AuthorityId, + ) -> Result> { + self.backend + .generate_grandpa_key_ownership_proof(at, set_id, authority_id) + .await + } + + async fn subscribe_beefy_finality_justifications(&self) -> Result> { + self.subscribe_finality_justifications( + &self.data.beefy_justifications, + self.backend.subscribe_beefy_finality_justifications(), + ) + .await + } + + async fn token_decimals(&self) -> Result> { + self.backend.token_decimals().await + } + + async fn runtime_version(&self) -> Result { + self.backend.runtime_version().await + } + + async fn simple_runtime_version(&self) -> Result { + self.backend.simple_runtime_version().await + } + + fn can_start_version_guard(&self) -> bool { + self.backend.can_start_version_guard() + } + + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + self.get_or_insert_async( + &self.data.raw_storage_value_cache, + &(at, storage_key.clone()), + self.backend.raw_storage_value(at, storage_key), + ) + .await + } + + async fn pending_extrinsics(&self) -> Result> { + self.backend.pending_extrinsics().await + } + + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result> { + self.backend.submit_unsigned_extrinsic(transaction).await + } + + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + self.backend.submit_signed_extrinsic(signer, prepare_extrinsic).await + } + + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + self.backend + .submit_and_watch_signed_extrinsic(signer, prepare_extrinsic) + .await + .map(|t| t.switch_environment(self.clone())) + } + + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.backend.validate_transaction(at, transaction).await + } + + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.backend.estimate_extrinsic_weight(at, transaction).await + } + + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let encoded_arguments = Bytes(arguments.encode()); + self.get_or_insert_async( + &self.data.state_call_cache, + &(at, method.clone(), encoded_arguments), + self.backend.raw_state_call(at, method, arguments), + ) + .await + } + + async fn prove_storage( + &self, + at: HashOf, + keys: Vec, + ) -> Result<(StorageProof, HashOf)> { + self.backend.prove_storage(at, keys).await + } +} diff --git a/bridges/relays/client-substrate/src/client/mod.rs b/bridges/relays/client-substrate/src/client/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..62a1119d718ff79fa45800166c01762e2044f781 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/mod.rs @@ -0,0 +1,91 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Layered Substrate client implementation. + +use crate::{Chain, ConnectionParams}; + +use caching::CachingClient; +use num_traits::Saturating; +use rpc::RpcClient; +use sp_version::RuntimeVersion; + +pub mod caching; +pub mod rpc; + +mod rpc_api; +mod subscription; +mod traits; + +pub use subscription::{StreamDescription, Subscription, SubscriptionBroadcaster}; +pub use traits::Client; + +/// Type of RPC client with caching support. +pub type RpcWithCachingClient = CachingClient>; + +/// Creates new RPC client with caching support. +pub async fn rpc_with_caching(params: ConnectionParams) -> RpcWithCachingClient { + let rpc = rpc::RpcClient::::new(params).await; + caching::CachingClient::new(rpc).await +} + +/// The difference between best block number and number of its ancestor, that is enough +/// for us to consider that ancestor an "ancient" block with dropped state. +/// +/// The relay does not assume that it is connected to the archive node, so it always tries +/// to use the best available chain state. But sometimes it still may use state of some +/// old block. If the state of that block is already dropped, relay will see errors when +/// e.g. it tries to prove something. +/// +/// By default Substrate-based nodes are storing state for last 256 blocks. We'll use +/// half of this value. +pub const ANCIENT_BLOCK_THRESHOLD: u32 = 128; + +/// Returns `true` if we think that the state is already discarded for given block. +pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: N) -> bool { + best.saturating_sub(block) >= N::from(ANCIENT_BLOCK_THRESHOLD) +} + +/// Opaque GRANDPA authorities set. +pub type OpaqueGrandpaAuthoritiesSet = Vec; + +/// A simple runtime version. It only includes the `spec_version` and `transaction_version`. +#[derive(Copy, Clone, Debug)] +pub struct SimpleRuntimeVersion { + /// Version of the runtime specification. + pub spec_version: u32, + /// All existing dispatches are fully compatible when this number doesn't change. + pub transaction_version: u32, +} + +impl SimpleRuntimeVersion { + /// Create a new instance of `SimpleRuntimeVersion` from a `RuntimeVersion`. + pub const fn from_runtime_version(runtime_version: &RuntimeVersion) -> Self { + Self { + spec_version: runtime_version.spec_version, + transaction_version: runtime_version.transaction_version, + } + } +} + +/// Chain runtime version in client +#[derive(Copy, Clone, Debug)] +pub enum ChainRuntimeVersion { + /// Auto query from chain. + Auto, + /// Custom runtime version, defined by user. + Custom(SimpleRuntimeVersion), +} diff --git a/bridges/relays/client-substrate/src/client/rpc.rs b/bridges/relays/client-substrate/src/client/rpc.rs new file mode 100644 index 0000000000000000000000000000000000000000..9c7f769462e5693bc944ed6a6525439f00311ee7 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/rpc.rs @@ -0,0 +1,755 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Client implementation that connects to the Substrate node over `ws`/`wss` connection +//! and is using RPC methods to get required data and submit transactions. + +use crate::{ + client::{ + rpc_api::{ + SubstrateAuthorClient, SubstrateBeefyClient, SubstrateChainClient, + SubstrateFrameSystemClient, SubstrateGrandpaClient, SubstrateStateClient, + SubstrateSystemClient, + }, + subscription::{StreamDescription, Subscription}, + Client, + }, + error::{Error, Result}, + guard::Environment, + transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BalanceOf, BlockNumberOf, Chain, + ChainRuntimeVersion, ChainWithGrandpa, ChainWithTransactions, ConnectionParams, HashOf, + HeaderIdOf, HeaderOf, NonceOf, SignParam, SignedBlockOf, SimpleRuntimeVersion, + TransactionTracker, UnsignedTransaction, +}; + +use async_std::sync::{Arc, Mutex, RwLock}; +use async_trait::async_trait; +use bp_runtime::HeaderIdProvider; +use codec::Encode; +use frame_support::weights::Weight; +use futures::TryFutureExt; +use jsonrpsee::{ + core::{client::Subscription as RpcSubscription, ClientError}, + ws_client::{WsClient, WsClientBuilder}, +}; +use num_traits::Zero; +use pallet_transaction_payment::RuntimeDispatchInfo; +use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Hasher, Pair, +}; +use sp_runtime::{ + traits::Header, + transaction_validity::{TransactionSource, TransactionValidity}, +}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; +use std::{cmp::Ordering, future::Future, marker::PhantomData}; + +const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; + +const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; +const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; +const SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF: &str = + "GrandpaApi_generate_key_ownership_proof"; + +/// Client implementation that connects to the Substrate node over `ws`/`wss` connection +/// and is using RPC methods to get required data and submit transactions. +pub struct RpcClient { + // Lock order: `submit_signed_extrinsic_lock`, `data` + /// Client connection params. + params: Arc, + /// If several tasks are submitting their transactions simultaneously using + /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of + /// transactions will be rejected from the pool. This lock is here to prevent situations like + /// that. + submit_signed_extrinsic_lock: Arc>, + /// Genesis block hash. + genesis_hash: HashOf, + /// Shared dynamic data. + data: Arc>, + /// Generic arguments dump. + _phantom: PhantomData, +} + +/// Client data, shared by all `RpcClient` clones. +struct ClientData { + /// Tokio runtime handle. + tokio: Arc, + /// Substrate RPC client. + client: Arc, +} + +/// Already encoded value. +struct PreEncoded(Vec); + +impl Encode for PreEncoded { + fn encode(&self) -> Vec { + self.0.clone() + } +} + +impl std::fmt::Debug for RpcClient { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_fmt(format_args!("RpcClient<{}>", C::NAME)) + } +} + +impl RpcClient { + /// Returns client that is able to call RPCs on Substrate node over websocket connection. + /// + /// This function will keep connecting to given Substrate node until connection is established + /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. + pub async fn new(params: ConnectionParams) -> Self { + let params = Arc::new(params); + loop { + match Self::try_connect(params.clone()).await { + Ok(client) => return client, + Err(error) => log::error!( + target: "bridge", + "Failed to connect to {} node: {:?}. Going to retry in {}s", + C::NAME, + error, + RECONNECT_DELAY.as_secs(), + ), + } + + async_std::task::sleep(RECONNECT_DELAY).await; + } + } + + /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection + /// has been established or error otherwise. + async fn try_connect(params: Arc) -> Result { + let (tokio, client) = Self::build_client(¶ms).await?; + + let genesis_hash_client = client.clone(); + let genesis_hash = tokio + .spawn(async move { + SubstrateChainClient::::block_hash(&*genesis_hash_client, Some(Zero::zero())) + .await + }) + .await??; + + let chain_runtime_version = params.chain_runtime_version; + let mut client = Self { + params, + submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), + genesis_hash, + data: Arc::new(RwLock::new(ClientData { tokio, client })), + _phantom: PhantomData, + }; + Self::ensure_correct_runtime_version(&mut client, chain_runtime_version).await?; + Ok(client) + } + + // Check runtime version to understand if we need are connected to expected version, or we + // need to wait for upgrade, we need to abort immediately. + async fn ensure_correct_runtime_version>( + env: &mut E, + expected: ChainRuntimeVersion, + ) -> Result<()> { + // we are only interested if version mode is bundled or passed using CLI + let expected = match expected { + ChainRuntimeVersion::Auto => return Ok(()), + ChainRuntimeVersion::Custom(expected) => expected, + }; + + // we need to wait if actual version is < than expected, we are OK of versions are the + // same and we need to abort if actual version is > than expected + let actual = SimpleRuntimeVersion::from_runtime_version(&env.runtime_version().await?); + match actual.spec_version.cmp(&expected.spec_version) { + Ordering::Less => + Err(Error::WaitingForRuntimeUpgrade { chain: C::NAME.into(), expected, actual }), + Ordering::Equal => Ok(()), + Ordering::Greater => { + log::error!( + target: "bridge", + "The {} client is configured to use runtime version {expected:?} and actual \ + version is {actual:?}. Aborting", + C::NAME, + ); + env.abort().await; + Err(Error::Custom("Aborted".into())) + }, + } + } + + /// Build client to use in connection. + async fn build_client( + params: &ConnectionParams, + ) -> Result<(Arc, Arc)> { + let tokio = tokio::runtime::Runtime::new()?; + let uri = match params.uri { + Some(ref uri) => uri.clone(), + None => { + format!( + "{}://{}:{}{}", + if params.secure { "wss" } else { "ws" }, + params.host, + params.port, + match params.path { + Some(ref path) => format!("/{}", path), + None => String::new(), + }, + ) + }, + }; + log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri); + + let client = tokio + .spawn(async move { + WsClientBuilder::default() + .max_buffer_capacity_per_subscription(MAX_SUBSCRIPTION_CAPACITY) + .build(&uri) + .await + }) + .await??; + + Ok((Arc::new(tokio), Arc::new(client))) + } + + /// Execute jsonrpsee future in tokio context. + async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result + where + MF: FnOnce(Arc) -> F + Send + 'static, + F: Future> + Send + 'static, + T: Send + 'static, + { + let data = self.data.read().await; + let client = data.client.clone(); + data.tokio.spawn(make_jsonrpsee_future(client)).await? + } + + /// Prepare parameters used to sign chain transactions. + async fn build_sign_params(&self, signer: AccountKeyPairOf) -> Result> + where + C: ChainWithTransactions, + { + let runtime_version = self.simple_runtime_version().await?; + Ok(SignParam:: { + spec_version: runtime_version.spec_version, + transaction_version: runtime_version.transaction_version, + genesis_hash: self.genesis_hash, + signer, + }) + } + + /// Get the nonce of the given Substrate account. + pub async fn next_account_index(&self, account: AccountIdOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateFrameSystemClient::::account_next_index(&*client, account).await?) + }) + .await + } + + /// Subscribe to finality justifications. + async fn subscribe_finality_justifications( + &self, + gadget_name: &str, + do_subscribe: impl FnOnce(Arc) -> Fut + Send + 'static, + ) -> Result> + where + Fut: Future, ClientError>> + Send, + { + let subscription = self + .jsonrpsee_execute(move |client| async move { Ok(do_subscribe(client).await?) }) + .map_err(|e| Error::failed_to_subscribe_justification::(e)) + .await?; + + Ok(Subscription::new_forwarded( + StreamDescription::new(format!("{} justifications", gadget_name), C::NAME.into()), + subscription, + )) + } + + /// Subscribe to headers stream. + async fn subscribe_headers( + &self, + stream_name: &str, + do_subscribe: impl FnOnce(Arc) -> Fut + Send + 'static, + map_err: impl FnOnce(Error) -> Error, + ) -> Result>> + where + Fut: Future>, ClientError>> + Send, + { + let subscription = self + .jsonrpsee_execute(move |client| async move { Ok(do_subscribe(client).await?) }) + .map_err(map_err) + .await?; + + Ok(Subscription::new_forwarded( + StreamDescription::new(format!("{} headers", stream_name), C::NAME.into()), + subscription, + )) + } +} + +impl Clone for RpcClient { + fn clone(&self) -> Self { + RpcClient { + params: self.params.clone(), + submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), + genesis_hash: self.genesis_hash, + data: self.data.clone(), + _phantom: PhantomData, + } + } +} + +#[async_trait] +impl Client for RpcClient { + async fn ensure_synced(&self) -> Result<()> { + let health = self + .jsonrpsee_execute(|client| async move { + Ok(SubstrateSystemClient::::health(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_get_system_health::(e))?; + + let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); + if is_synced { + Ok(()) + } else { + Err(Error::ClientNotSynced(health)) + } + } + + async fn reconnect(&self) -> Result<()> { + let mut data = self.data.write().await; + let (tokio, client) = Self::build_client(&self.params).await?; + data.tokio = tokio; + data.client = client; + Ok(()) + } + + fn genesis_hash(&self) -> HashOf { + self.genesis_hash + } + + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::block_hash(&*client, Some(number)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_header_hash_by_number::(number, e)) + } + + async fn header_by_hash(&self, hash: HashOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::header(&*client, Some(hash)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_header_by_hash::(hash, e)) + } + + async fn block_by_hash(&self, hash: HashOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::block(&*client, Some(hash)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_block_by_hash::(hash, e)) + } + + async fn best_finalized_header_hash(&self) -> Result> { + self.jsonrpsee_execute(|client| async move { + Ok(SubstrateChainClient::::finalized_head(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_read_best_finalized_header_hash::(e)) + } + + async fn best_header(&self) -> Result> { + self.jsonrpsee_execute(|client| async move { + Ok(SubstrateChainClient::::header(&*client, None).await?) + }) + .await + .map_err(|e| Error::failed_to_read_best_header::(e)) + } + + async fn subscribe_best_headers(&self) -> Result>> { + self.subscribe_headers( + "best headers", + move |client| async move { SubstrateChainClient::::subscribe_new_heads(&*client).await }, + |e| Error::failed_to_subscribe_best_headers::(e), + ) + .await + } + + async fn subscribe_finalized_headers(&self) -> Result>> { + self.subscribe_headers( + "best finalized headers", + move |client| async move { + SubstrateChainClient::::subscribe_finalized_heads(&*client).await + }, + |e| Error::failed_to_subscribe_finalized_headers::(e), + ) + .await + } + + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa, + { + self.subscribe_finality_justifications("GRANDPA", move |client| async move { + SubstrateGrandpaClient::::subscribe_justifications(&*client).await + }) + .await + } + + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: sp_consensus_grandpa::SetId, + authority_id: sp_consensus_grandpa::AuthorityId, + ) -> Result> { + self.state_call( + at, + SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF.into(), + (set_id, authority_id), + ) + .await + } + + async fn subscribe_beefy_finality_justifications(&self) -> Result> { + self.subscribe_finality_justifications("BEEFY", move |client| async move { + SubstrateBeefyClient::::subscribe_justifications(&*client).await + }) + .await + } + + async fn token_decimals(&self) -> Result> { + self.jsonrpsee_execute(move |client| async move { + let system_properties = SubstrateSystemClient::::properties(&*client).await?; + Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) + }) + .await + } + + async fn runtime_version(&self) -> Result { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateStateClient::::runtime_version(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_read_runtime_version::(e)) + } + + async fn simple_runtime_version(&self) -> Result { + Ok(match self.params.chain_runtime_version { + ChainRuntimeVersion::Auto => { + let runtime_version = self.runtime_version().await?; + SimpleRuntimeVersion::from_runtime_version(&runtime_version) + }, + ChainRuntimeVersion::Custom(ref version) => *version, + }) + } + + fn can_start_version_guard(&self) -> bool { + !matches!(self.params.chain_runtime_version, ChainRuntimeVersion::Auto) + } + + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + let cloned_storage_key = storage_key.clone(); + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateStateClient::::storage(&*client, cloned_storage_key, Some(at)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_storage_value::(at, storage_key, e)) + } + + async fn pending_extrinsics(&self) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateAuthorClient::::pending_extrinsics(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_get_pending_extrinsics::(e)) + } + + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result> { + // one last check that the transaction is valid. Most of checks happen in the relay loop and + // it is the "final" check before submission. + let best_header_hash = self.best_header_hash().await?; + self.validate_transaction(best_header_hash, PreEncoded(transaction.0.clone())) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e))? + .map_err(|e| Error::failed_to_submit_transaction::(Error::TransactionInvalid(e)))?; + + self.jsonrpsee_execute(move |client| async move { + let tx_hash = SubstrateAuthorClient::::submit_extrinsic(&*client, transaction) + .await + .map_err(|e| { + log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); + e + })?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(tx_hash) + }) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e)) + } + + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(signer.public().into()).await?; + let best_header = self.best_header().await?; + let signing_data = self.build_sign_params(signer.clone()).await?; + + // By using parent of best block here, we are protecting again best-block reorganizations. + // E.g. transaction may have been submitted when the best block was `A[num=100]`. Then it + // has been changed to `B[num=100]`. Hash of `A` has been included into transaction + // signature payload. So when signature will be checked, the check will fail and transaction + // will be dropped from the pool. + let best_header_id = best_header.parent_id().unwrap_or_else(|| best_header.id()); + + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; + let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); + self.submit_unsigned_extrinsic(Bytes(signed_extrinsic)).await + } + + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + let self_clone = self.clone(); + let signing_data = self.build_sign_params(signer.clone()).await?; + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(signer.public().into()).await?; + let best_header = self.best_header().await?; + let best_header_id = best_header.id(); + + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; + let stall_timeout = transaction_stall_timeout( + extrinsic.era.mortality_period(), + C::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); + let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); + + // one last check that the transaction is valid. Most of checks happen in the relay loop and + // it is the "final" check before submission. + self.validate_transaction(best_header_id.hash(), PreEncoded(signed_extrinsic.clone())) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e))? + .map_err(|e| Error::failed_to_submit_transaction::(Error::TransactionInvalid(e)))?; + + self.jsonrpsee_execute(move |client| async move { + let tx_hash = C::Hasher::hash(&signed_extrinsic); + let subscription: jsonrpsee::core::client::Subscription<_> = + SubstrateAuthorClient::::submit_and_watch_extrinsic( + &*client, + Bytes(signed_extrinsic), + ) + .await + .map_err(|e| { + log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); + e + })?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(TransactionTracker::new( + self_clone, + stall_timeout, + tx_hash, + Subscription::new_forwarded( + StreamDescription::new("transaction events".into(), C::NAME.into()), + subscription, + ), + )) + }) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e)) + } + + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.state_call( + at, + SUB_API_TXPOOL_VALIDATE_TRANSACTION.into(), + (TransactionSource::External, transaction, at), + ) + .await + } + + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + let transaction_len = transaction.encoded_size() as u32; + let dispatch_info: RuntimeDispatchInfo> = self + .state_call(at, SUB_API_TX_PAYMENT_QUERY_INFO.into(), (transaction, transaction_len)) + .await?; + + Ok(dispatch_info.weight) + } + + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let arguments = Bytes(arguments.encode()); + let arguments_clone = arguments.clone(); + let method_clone = method.clone(); + self.jsonrpsee_execute(move |client| async move { + SubstrateStateClient::::call(&*client, method, arguments, Some(at)) + .await + .map_err(Into::into) + }) + .await + .map_err(|e| Error::failed_state_call::(at, method_clone, arguments_clone, e)) + } + + async fn prove_storage( + &self, + at: HashOf, + keys: Vec, + ) -> Result<(StorageProof, HashOf)> { + let state_root = *self.header_by_hash(at).await?.state_root(); + + let keys_clone = keys.clone(); + let read_proof = self + .jsonrpsee_execute(move |client| async move { + SubstrateStateClient::::prove_storage(&*client, keys_clone, Some(at)) + .await + .map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0))) + .map_err(Into::into) + }) + .await + .map_err(|e| Error::failed_to_prove_storage::(at, keys.clone(), e))?; + + Ok((read_proof, state_root)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{guard::tests::TestEnvironment, test_chain::TestChain}; + use futures::{channel::mpsc::unbounded, FutureExt, SinkExt, StreamExt}; + + async fn run_ensure_correct_runtime_version( + expected: ChainRuntimeVersion, + actual: RuntimeVersion, + ) -> Result<()> { + let ( + (mut runtime_version_tx, runtime_version_rx), + (slept_tx, _slept_rx), + (aborted_tx, mut aborted_rx), + ) = (unbounded(), unbounded(), unbounded()); + runtime_version_tx.send(actual).await.unwrap(); + let mut env = TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }; + + let ensure_correct_runtime_version = + RpcClient::::ensure_correct_runtime_version(&mut env, expected).boxed(); + let aborted = aborted_rx.next().map(|_| Err(Error::Custom("".into()))).boxed(); + futures::pin_mut!(ensure_correct_runtime_version, aborted); + futures::future::select(ensure_correct_runtime_version, aborted) + .await + .into_inner() + .0 + } + + #[async_std::test] + async fn ensure_correct_runtime_version_works() { + // when we are configured to use auto version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Auto, + RuntimeVersion { + spec_version: 100, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Ok(()), + )); + // when actual == expected + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { + spec_version: 100, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Ok(()), + )); + // when actual spec version < expected spec version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { spec_version: 99, transaction_version: 100, ..Default::default() }, + ) + .await, + Err(Error::WaitingForRuntimeUpgrade { + expected: SimpleRuntimeVersion { spec_version: 100, transaction_version: 100 }, + actual: SimpleRuntimeVersion { spec_version: 99, transaction_version: 100 }, + .. + }), + )); + // when actual spec version > expected spec version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { + spec_version: 101, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Err(Error::Custom(_)), + )); + } +} diff --git a/bridges/relays/client-substrate/src/rpc.rs b/bridges/relays/client-substrate/src/client/rpc_api.rs similarity index 80% rename from bridges/relays/client-substrate/src/rpc.rs rename to bridges/relays/client-substrate/src/client/rpc_api.rs index 60c29cdeb5c7707619b65d23800dcd7dfdfd840a..9cac69f7a13d06bf772fb9493520113d8e066681 100644 --- a/bridges/relays/client-substrate/src/rpc.rs +++ b/bridges/relays/client-substrate/src/client/rpc_api.rs @@ -16,15 +16,9 @@ //! The most generic Substrate node RPC interface. -use async_trait::async_trait; - use crate::{Chain, ChainWithGrandpa, TransactionStatusOf}; -use jsonrpsee::{ - core::{client::Subscription, ClientError}, - proc_macros::rpc, - ws_client::WsClient, -}; +use jsonrpsee::proc_macros::rpc; use pallet_transaction_payment_rpc_runtime_api::FeeDetails; use sc_rpc_api::{state::ReadProof, system::Health}; use sp_core::{ @@ -60,6 +54,20 @@ pub(crate) trait SubstrateChain { /// Return signed block (with justifications) by its hash. #[method(name = "getBlock")] async fn block(&self, block_hash: Option) -> RpcResult; + /// Subscribe to best headers. + #[subscription( + name = "subscribeNewHeads" => "newHead", + unsubscribe = "unsubscribeNewHeads", + item = C::Header + )] + async fn subscribe_new_heads(&self); + /// Subscribe to finalized headers. + #[subscription( + name = "subscribeFinalizedHeads" => "finalizedHead", + unsubscribe = "unsubscribeFinalizedHeads", + item = C::Header + )] + async fn subscribe_finalized_heads(&self); } /// RPC methods of Substrate `author` namespace, that we are using. @@ -106,15 +114,6 @@ pub(crate) trait SubstrateState { ) -> RpcResult>; } -/// RPC methods that we are using for a certain finality gadget. -#[async_trait] -pub trait SubstrateFinalityClient { - /// Subscribe to finality justifications. - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError>; -} - /// RPC methods of Substrate `grandpa` namespace, that we are using. #[rpc(client, client_bounds(C: ChainWithGrandpa), namespace = "grandpa")] pub(crate) trait SubstrateGrandpa { @@ -123,17 +122,6 @@ pub(crate) trait SubstrateGrandpa { async fn subscribe_justifications(&self); } -/// RPC finality methods of Substrate `grandpa` namespace, that we are using. -pub struct SubstrateGrandpaFinalityClient; -#[async_trait] -impl SubstrateFinalityClient for SubstrateGrandpaFinalityClient { - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError> { - SubstrateGrandpaClient::::subscribe_justifications(client).await - } -} - // TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged /// RPC methods of Substrate `beefy` namespace, that we are using. #[rpc(client, client_bounds(C: Chain), namespace = "beefy")] @@ -143,18 +131,6 @@ pub(crate) trait SubstrateBeefy { async fn subscribe_justifications(&self); } -/// RPC finality methods of Substrate `beefy` namespace, that we are using. -pub struct SubstrateBeefyFinalityClient; -// TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged -#[async_trait] -impl SubstrateFinalityClient for SubstrateBeefyFinalityClient { - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError> { - SubstrateBeefyClient::::subscribe_justifications(client).await - } -} - /// RPC methods of Substrate `system` frame pallet, that we are using. #[rpc(client, client_bounds(C: Chain), namespace = "system")] pub(crate) trait SubstrateFrameSystem { diff --git a/bridges/relays/client-substrate/src/client/subscription.rs b/bridges/relays/client-substrate/src/client/subscription.rs new file mode 100644 index 0000000000000000000000000000000000000000..9f08097cb583a57a28885a7aa59a1731e997d023 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/subscription.rs @@ -0,0 +1,238 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::error::Result as ClientResult; + +use async_std::{ + channel::{bounded, Receiver, Sender}, + stream::StreamExt, +}; +use futures::{FutureExt, Stream}; +use sp_runtime::DeserializeOwned; +use std::{ + fmt::Debug, + pin::Pin, + result::Result as StdResult, + task::{Context, Poll}, +}; + +/// Once channel reaches this capacity, the subscription breaks. +const CHANNEL_CAPACITY: usize = 128; + +/// Structure describing a stream. +#[derive(Clone)] +pub struct StreamDescription { + stream_name: String, + chain_name: String, +} + +impl StreamDescription { + /// Create a new instance of `StreamDescription`. + pub fn new(stream_name: String, chain_name: String) -> Self { + Self { stream_name, chain_name } + } + + /// Get a stream description. + fn get(&self) -> String { + format!("{} stream of {}", self.stream_name, self.chain_name) + } +} + +/// Chainable stream that transforms items of type `Result` to items of type `T`. +/// +/// If it encounters an item of type `Err`, it returns `Poll::Ready(None)` +/// and terminates the underlying stream. +struct Unwrap>, T, E> { + desc: StreamDescription, + stream: Option, +} + +impl>, T, E> Unwrap { + /// Create a new instance of `Unwrap`. + pub fn new(desc: StreamDescription, stream: S) -> Self { + Self { desc, stream: Some(stream) } + } +} + +impl> + Unpin, T: DeserializeOwned, E: Debug> Stream + for Unwrap +{ + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Poll::Ready(match self.stream.as_mut() { + Some(subscription) => match futures::ready!(Pin::new(subscription).poll_next(cx)) { + Some(Ok(item)) => Some(item), + Some(Err(e)) => { + self.stream.take(); + log::debug!( + target: "bridge", + "{} has returned error: {:?}. It may need to be restarted", + self.desc.get(), + e, + ); + None + }, + None => { + self.stream.take(); + log::debug!( + target: "bridge", + "{} has returned `None`. It may need to be restarted", + self.desc.get() + ); + None + }, + }, + None => None, + }) + } +} + +/// Subscription factory that produces subscriptions, sharing the same background thread. +#[derive(Clone)] +pub struct SubscriptionBroadcaster { + desc: StreamDescription, + subscribers_sender: Sender>, +} + +impl SubscriptionBroadcaster { + /// Create new subscription factory. + pub fn new(subscription: Subscription) -> StdResult> { + // It doesn't make sense to further broadcast a broadcasted subscription. + if subscription.is_broadcasted { + return Err(subscription) + } + + let desc = subscription.desc().clone(); + let (subscribers_sender, subscribers_receiver) = bounded(CHANNEL_CAPACITY); + async_std::task::spawn(background_worker(subscription, subscribers_receiver)); + Ok(Self { desc, subscribers_sender }) + } + + /// Produce new subscription. + pub async fn subscribe(&self) -> ClientResult> { + let (items_sender, items_receiver) = bounded(CHANNEL_CAPACITY); + self.subscribers_sender.try_send(items_sender)?; + + Ok(Subscription::new_broadcasted(self.desc.clone(), items_receiver)) + } +} + +/// Subscription to some chain events. +pub struct Subscription { + desc: StreamDescription, + subscription: Box + Unpin + Send>, + is_broadcasted: bool, +} + +impl Subscription { + /// Create new forwarded subscription. + pub fn new_forwarded( + desc: StreamDescription, + subscription: impl Stream> + Unpin + Send + 'static, + ) -> Self { + Self { + desc: desc.clone(), + subscription: Box::new(Unwrap::new(desc, subscription)), + is_broadcasted: false, + } + } + + /// Create new broadcasted subscription. + pub fn new_broadcasted( + desc: StreamDescription, + subscription: impl Stream + Unpin + Send + 'static, + ) -> Self { + Self { desc, subscription: Box::new(subscription), is_broadcasted: true } + } + + /// Get the description of the underlying stream + pub fn desc(&self) -> &StreamDescription { + &self.desc + } +} + +impl Stream for Subscription { + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Poll::Ready(futures::ready!(Pin::new(&mut self.subscription).poll_next(cx))) + } +} + +/// Background worker that is executed in tokio context as `jsonrpsee` requires. +/// +/// This task may exit under some circumstances. It'll send the correspondent +/// message (`Err` or `None`) to all known listeners. Also, when it stops, all +/// subsequent reads and new subscribers will get the connection error (`ChannelError`). +async fn background_worker( + mut subscription: Subscription, + mut subscribers_receiver: Receiver>, +) { + fn log_task_exit(desc: &StreamDescription, reason: &str) { + log::debug!( + target: "bridge", + "Background task of subscription broadcaster for {} has stopped: {}", + desc.get(), + reason, + ); + } + + // wait for first subscriber until actually starting subscription + let subscriber = match subscribers_receiver.next().await { + Some(subscriber) => subscriber, + None => { + // it means that the last subscriber/factory has been dropped, so we need to + // exit too + return log_task_exit(subscription.desc(), "client has stopped") + }, + }; + + // actually subscribe + let mut subscribers = vec![subscriber]; + + // start listening for new items and receivers + loop { + futures::select! { + subscriber = subscribers_receiver.next().fuse() => { + match subscriber { + Some(subscriber) => subscribers.push(subscriber), + None => { + // it means that the last subscriber/factory has been dropped, so we need to + // exit too + return log_task_exit(subscription.desc(), "client has stopped") + }, + } + }, + maybe_item = subscription.subscription.next().fuse() => { + match maybe_item { + Some(item) => { + // notify subscribers + subscribers.retain(|subscriber| { + let send_result = subscriber.try_send(item.clone()); + send_result.is_ok() + }); + } + None => { + // The underlying client has dropped, so we can't do anything here + // and need to stop the task. + return log_task_exit(subscription.desc(), "stream has finished"); + } + } + }, + } + } +} diff --git a/bridges/relays/client-substrate/src/client/traits.rs b/bridges/relays/client-substrate/src/client/traits.rs new file mode 100644 index 0000000000000000000000000000000000000000..6f4ef5aa951062ddd6586e0dadeb4ce7425eca5e --- /dev/null +++ b/bridges/relays/client-substrate/src/client/traits.rs @@ -0,0 +1,234 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::{ + error::{Error, Result}, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, ChainWithGrandpa, ChainWithTransactions, + HashOf, HeaderIdOf, HeaderOf, NonceOf, SignedBlockOf, SimpleRuntimeVersion, Subscription, + TransactionTracker, UnsignedTransaction, +}; + +use async_trait::async_trait; +use bp_runtime::{StorageDoubleMapKeyProvider, StorageMapKeyProvider}; +use codec::{Decode, Encode}; +use frame_support::weights::Weight; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Pair, +}; +use sp_runtime::{traits::Header as _, transaction_validity::TransactionValidity}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; +use std::fmt::Debug; + +/// Relay uses the `Client` to communicate with the node, connected to Substrate +/// chain `C`. +#[async_trait] +pub trait Client: 'static + Send + Sync + Clone + Debug { + /// Returns error if client has no connected peers or it believes it is far + /// behind the chain tip. + async fn ensure_synced(&self) -> Result<()>; + /// Reconnects the client. + async fn reconnect(&self) -> Result<()>; + + /// Return hash of the genesis block. + fn genesis_hash(&self) -> HashOf; + /// Get header hash by number. + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result>; + /// Get header by hash. + async fn header_by_hash(&self, hash: HashOf) -> Result>; + /// Get header by number. + async fn header_by_number(&self, number: BlockNumberOf) -> Result> { + self.header_by_hash(self.header_hash_by_number(number).await?).await + } + /// Get block by hash. + async fn block_by_hash(&self, hash: HashOf) -> Result>; + + /// Get best finalized header hash. + async fn best_finalized_header_hash(&self) -> Result>; + /// Get best finalized header number. + async fn best_finalized_header_number(&self) -> Result> { + Ok(*self.best_finalized_header().await?.number()) + } + /// Get best finalized header. + async fn best_finalized_header(&self) -> Result> { + self.header_by_hash(self.best_finalized_header_hash().await?).await + } + + /// Get best header. + async fn best_header(&self) -> Result>; + /// Get best header hash. + async fn best_header_hash(&self) -> Result> { + Ok(self.best_header().await?.hash()) + } + + /// Subscribe to new best headers. + async fn subscribe_best_headers(&self) -> Result>>; + /// Subscribe to new finalized headers. + async fn subscribe_finalized_headers(&self) -> Result>>; + + /// Subscribe to GRANDPA finality justifications. + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa; + /// Generates a proof of key ownership for the given authority in the given set. + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: sp_consensus_grandpa::SetId, + authority_id: sp_consensus_grandpa::AuthorityId, + ) -> Result>; + + /// Subscribe to BEEFY finality justifications. + async fn subscribe_beefy_finality_justifications(&self) -> Result>; + + /// Return `tokenDecimals` property from the set of chain properties. + async fn token_decimals(&self) -> Result>; + /// Get runtime version of the connected chain. + async fn runtime_version(&self) -> Result; + /// Get partial runtime version, to use when signing transactions. + async fn simple_runtime_version(&self) -> Result; + /// Returns `true` if version guard can be started. + /// + /// There's no reason to run version guard when version mode is set to `Auto`. It can + /// lead to relay shutdown when chain is upgraded, even though we have explicitly + /// said that we don't want to shutdown. + fn can_start_version_guard(&self) -> bool; + + /// Read raw value from runtime storage. + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result>; + /// Read and decode value from runtime storage. + async fn storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + self.raw_storage_value(at, storage_key.clone()) + .await? + .map(|encoded_value| { + T::decode(&mut &encoded_value.0[..]).map_err(|e| { + Error::failed_to_read_storage_value::(at, storage_key, e.into()) + }) + }) + .transpose() + } + /// Read and decode value from runtime storage map. + /// + /// `pallet_prefix` is the name of the pallet (used in `construct_runtime`), which + /// "contains" the storage map. + async fn storage_map_value( + &self, + at: HashOf, + pallet_prefix: &str, + storage_key: &T::Key, + ) -> Result> { + self.storage_value(at, T::final_key(pallet_prefix, storage_key)).await + } + /// Read and decode value from runtime storage double map. + /// + /// `pallet_prefix` is the name of the pallet (used in `construct_runtime`), which + /// "contains" the storage double map. + async fn storage_double_map_value( + &self, + at: HashOf, + pallet_prefix: &str, + key1: &T::Key1, + key2: &T::Key2, + ) -> Result> { + self.storage_value(at, T::final_key(pallet_prefix, key1, key2)).await + } + + /// Returns pending extrinsics from transaction pool. + async fn pending_extrinsics(&self) -> Result>; + /// Submit unsigned extrinsic for inclusion in a block. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result>; + /// Submit an extrinsic signed by given account. + /// + /// All calls of this method are synchronized, so there can't be more than one active + /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen + /// if all client instances are clones of the same initial `Client`. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>; + /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status + /// after submission. + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>; + /// Validate transaction at given block. + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result; + /// Returns weight of the given transaction. + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result; + + /// Execute runtime call at given block. + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result; + /// Execute runtime call at given block, provided the input and output types. + /// It also performs the input encode and output decode. + async fn state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let encoded_arguments = arguments.encode(); + let encoded_output = self.raw_state_call(at, method.clone(), arguments).await?; + Ret::decode(&mut &encoded_output.0[..]).map_err(|e| { + Error::failed_state_call::(at, method, Bytes(encoded_arguments), e.into()) + }) + } + + /// Returns storage proof of given storage keys and state root. + async fn prove_storage( + &self, + at: HashOf, + keys: Vec, + ) -> Result<(StorageProof, HashOf)>; +} diff --git a/bridges/relays/client-substrate/src/error.rs b/bridges/relays/client-substrate/src/error.rs index 2133c18887846b4f4360bdb6baa34799a24e6164..ee3c73f806e65362a10185d5dd9090f5bbc4c300 100644 --- a/bridges/relays/client-substrate/src/error.rs +++ b/bridges/relays/client-substrate/src/error.rs @@ -16,13 +16,13 @@ //! Substrate node RPC errors. -use crate::SimpleRuntimeVersion; +use crate::{BlockNumberOf, Chain, HashOf, SimpleRuntimeVersion}; use bp_header_chain::SubmitFinalityProofCallExtras; use bp_polkadot_core::parachains::ParaId; use jsonrpsee::core::ClientError as RpcError; use relay_utils::MaybeConnectionError; use sc_rpc_api::system::Health; -use sp_core::storage::StorageKey; +use sp_core::{storage::StorageKey, Bytes}; use sp_runtime::transaction_validity::TransactionValidityError; use thiserror::Error; @@ -43,12 +43,10 @@ pub enum Error { /// The response from the server could not be SCALE decoded. #[error("Response parse failed: {0}")] ResponseParseFailed(#[from] codec::Error), - /// Account does not exist on the chain. - #[error("Account does not exist on the chain.")] - AccountDoesNotExist, - /// Runtime storage is missing some mandatory value. - #[error("Mandatory storage value is missing from the runtime storage.")] - MissingMandatoryStorageValue, + /// Internal channel error - communication channel is either closed, or full. + /// It can be solved with reconnect. + #[error("Internal communication channel error: {0:?}.")] + ChannelError(String), /// Required parachain head is not present at the relay chain. #[error("Parachain {0:?} head {1} is missing from the relay chain storage.")] MissingRequiredParachainHead(ParaId, u64), @@ -58,6 +56,14 @@ pub enum Error { /// The client we're connected to is not synced, so we can't rely on its state. #[error("Substrate client is not synced {0}.")] ClientNotSynced(Health), + /// Failed to get system health. + #[error("Failed to get system health of {chain} node: {error:?}.")] + FailedToGetSystemHealth { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, /// Failed to read best finalized header hash from given chain. #[error("Failed to read best finalized header hash of {chain}: {error:?}.")] FailedToReadBestFinalizedHeaderHash { @@ -74,6 +80,16 @@ pub enum Error { /// Underlying error. error: Box, }, + /// Failed to read header hash by number from given chain. + #[error("Failed to read header hash by number {number} of {chain}: {error:?}.")] + FailedToReadHeaderHashByNumber { + /// Name of the chain where the error has happened. + chain: String, + /// Number of the header we've tried to read. + number: String, + /// Underlying error. + error: Box, + }, /// Failed to read header by hash from given chain. #[error("Failed to read header {hash} of {chain}: {error:?}.")] FailedToReadHeaderByHash { @@ -84,38 +100,119 @@ pub enum Error { /// Underlying error. error: Box, }, - /// Failed to execute runtime call at given chain. - #[error("Failed to execute runtime call {method} at {chain}: {error:?}.")] - ErrorExecutingRuntimeCall { + /// Failed to read block by hash from given chain. + #[error("Failed to read block {hash} of {chain}: {error:?}.")] + FailedToReadBlockByHash { /// Name of the chain where the error has happened. chain: String, - /// Runtime method name. - method: String, + /// Hash of the header we've tried to read. + hash: String, /// Underlying error. error: Box, }, /// Failed to read sotrage value at given chain. #[error("Failed to read storage value {key:?} at {chain}: {error:?}.")] - FailedToReadRuntimeStorageValue { + FailedToReadStorageValue { /// Name of the chain where the error has happened. chain: String, + /// Hash of the block we've tried to read value from. + hash: String, /// Runtime storage key key: StorageKey, /// Underlying error. error: Box, }, + /// Failed to read runtime version of given chain. + #[error("Failed to read runtime version of {chain}: {error:?}.")] + FailedToReadRuntimeVersion { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to get pending extrinsics. + #[error("Failed to get pending extrinsics of {chain}: {error:?}.")] + FailedToGetPendingExtrinsics { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to submit transaction. + #[error("Failed to submit {chain} transaction: {error:?}.")] + FailedToSubmitTransaction { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Runtime call has failed. + #[error("Runtime call {method} with arguments {arguments:?} of chain {chain} at {hash} has failed: {error:?}.")] + FailedStateCall { + /// Name of the chain where the error has happened. + chain: String, + /// Hash of the block we've tried to call at. + hash: String, + /// Runtime API method. + method: String, + /// Encoded method arguments. + arguments: Bytes, + /// Underlying error. + error: Box, + }, + /// Failed to prove storage keys. + #[error("Failed to prove storage keys {storage_keys:?} of {chain} at {hash}: {error:?}.")] + FailedToProveStorage { + /// Name of the chain where the error has happened. + chain: String, + /// Hash of the block we've tried to prove keys at. + hash: String, + /// Storage keys we have tried to prove. + storage_keys: Vec, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} best headers: {error:?}.")] + FailedToSubscribeBestHeaders { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} finalized headers: {error:?}.")] + FailedToSubscribeFinalizedHeaders { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} justifications: {error:?}.")] + FailedToSubscribeJustifications { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Headers of the chain are finalized out of order. Maybe chain has been + /// restarted? + #[error("Finalized headers of {chain} are unordered: previously finalized {prev_number} vs new {next_number}")] + UnorderedFinalizedHeaders { + /// Name of the chain where the error has happened. + chain: String, + /// Previously finalized header number. + prev_number: String, + /// New finalized header number. + next_number: String, + }, /// The bridge pallet is halted and all transactions will be rejected. #[error("Bridge pallet is halted.")] BridgePalletIsHalted, /// The bridge pallet is not yet initialized and all transactions will be rejected. #[error("Bridge pallet is not initialized.")] BridgePalletIsNotInitialized, - /// There's no best head of the parachain at the `pallet-bridge-parachains` at the target side. - #[error("No head of the ParaId({0}) at the bridge parachains pallet at {1}.")] - NoParachainHeadAtTarget(u32, String), - /// An error has happened when we have tried to parse storage proof. - #[error("Error when parsing storage proof: {0:?}.")] - StorageProofError(bp_runtime::StorageProofError), /// The Substrate transaction is invalid. #[error("Substrate transaction is invalid: {0:?}")] TransactionInvalid(#[from] TransactionValidityError), @@ -143,7 +240,19 @@ pub enum Error { impl From for Error { fn from(error: tokio::task::JoinError) -> Self { - Error::Custom(format!("Failed to wait tokio task: {error}")) + Error::ChannelError(format!("failed to wait tokio task: {error}")) + } +} + +impl From> for Error { + fn from(error: async_std::channel::TrySendError) -> Self { + Error::ChannelError(format!("`try_send` has failed: {error:?}")) + } +} + +impl From for Error { + fn from(error: async_std::channel::RecvError) -> Self { + Error::ChannelError(format!("`recv` has failed: {error:?}")) } } @@ -152,21 +261,170 @@ impl Error { pub fn boxed(self) -> Box { Box::new(self) } + + /// Returns nested error reference. + pub fn nested(&self) -> Option<&Self> { + match *self { + Self::FailedToReadBestFinalizedHeaderHash { ref error, .. } => Some(&**error), + Self::FailedToReadBestHeader { ref error, .. } => Some(&**error), + Self::FailedToReadHeaderHashByNumber { ref error, .. } => Some(&**error), + Self::FailedToReadHeaderByHash { ref error, .. } => Some(&**error), + Self::FailedToReadBlockByHash { ref error, .. } => Some(&**error), + Self::FailedToReadStorageValue { ref error, .. } => Some(&**error), + Self::FailedToReadRuntimeVersion { ref error, .. } => Some(&**error), + Self::FailedToGetPendingExtrinsics { ref error, .. } => Some(&**error), + Self::FailedToSubmitTransaction { ref error, .. } => Some(&**error), + Self::FailedStateCall { ref error, .. } => Some(&**error), + Self::FailedToProveStorage { ref error, .. } => Some(&**error), + Self::FailedToGetSystemHealth { ref error, .. } => Some(&**error), + Self::FailedToSubscribeBestHeaders { ref error, .. } => Some(&**error), + Self::FailedToSubscribeFinalizedHeaders { ref error, .. } => Some(&**error), + Self::FailedToSubscribeJustifications { ref error, .. } => Some(&**error), + _ => None, + } + } + + /// Constructs `FailedToReadHeaderHashByNumber` variant. + pub fn failed_to_read_header_hash_by_number( + number: BlockNumberOf, + e: Error, + ) -> Self { + Error::FailedToReadHeaderHashByNumber { + chain: C::NAME.into(), + number: format!("{number}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadHeaderByHash` variant. + pub fn failed_to_read_header_by_hash(hash: HashOf, e: Error) -> Self { + Error::FailedToReadHeaderByHash { + chain: C::NAME.into(), + hash: format!("{hash}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadBlockByHash` variant. + pub fn failed_to_read_block_by_hash(hash: HashOf, e: Error) -> Self { + Error::FailedToReadHeaderByHash { + chain: C::NAME.into(), + hash: format!("{hash}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadBestFinalizedHeaderHash` variant. + pub fn failed_to_read_best_finalized_header_hash(e: Error) -> Self { + Error::FailedToReadBestFinalizedHeaderHash { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadBestHeader` variant. + pub fn failed_to_read_best_header(e: Error) -> Self { + Error::FailedToReadBestHeader { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadRuntimeVersion` variant. + pub fn failed_to_read_runtime_version(e: Error) -> Self { + Error::FailedToReadRuntimeVersion { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadStorageValue` variant. + pub fn failed_to_read_storage_value( + at: HashOf, + key: StorageKey, + e: Error, + ) -> Self { + Error::FailedToReadStorageValue { + chain: C::NAME.into(), + hash: format!("{at}"), + key, + error: e.boxed(), + } + } + + /// Constructs `FailedToGetPendingExtrinsics` variant. + pub fn failed_to_get_pending_extrinsics(e: Error) -> Self { + Error::FailedToGetPendingExtrinsics { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubmitTransaction` variant. + pub fn failed_to_submit_transaction(e: Error) -> Self { + Error::FailedToSubmitTransaction { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedStateCall` variant. + pub fn failed_state_call( + at: HashOf, + method: String, + arguments: Bytes, + e: Error, + ) -> Self { + Error::FailedStateCall { + chain: C::NAME.into(), + hash: format!("{at}"), + method, + arguments, + error: e.boxed(), + } + } + + /// Constructs `FailedToProveStorage` variant. + pub fn failed_to_prove_storage( + at: HashOf, + storage_keys: Vec, + e: Error, + ) -> Self { + Error::FailedToProveStorage { + chain: C::NAME.into(), + hash: format!("{at}"), + storage_keys, + error: e.boxed(), + } + } + + /// Constructs `FailedToGetSystemHealth` variant. + pub fn failed_to_get_system_health(e: Error) -> Self { + Error::FailedToGetSystemHealth { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeBestHeaders` variant. + pub fn failed_to_subscribe_best_headers(e: Error) -> Self { + Error::FailedToSubscribeBestHeaders { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeFinalizedHeaders` variant. + pub fn failed_to_subscribe_finalized_headers(e: Error) -> Self { + Error::FailedToSubscribeFinalizedHeaders { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeJustifications` variant. + pub fn failed_to_subscribe_justification(e: Error) -> Self { + Error::FailedToSubscribeJustifications { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `Un` + pub fn unordered_finalized_headers( + prev_number: BlockNumberOf, + next_number: BlockNumberOf, + ) -> Self { + Error::UnorderedFinalizedHeaders { + chain: C::NAME.into(), + prev_number: format!("{}", prev_number), + next_number: format!("{}", next_number), + } + } } impl MaybeConnectionError for Error { fn is_connection_error(&self) -> bool { match *self { - Error::RpcError(RpcError::Transport(_)) | - Error::RpcError(RpcError::RestartNeeded(_)) | + Error::ChannelError(_) => true, + Error::RpcError(ref e) => + matches!(*e, RpcError::Transport(_) | RpcError::RestartNeeded(_),), Error::ClientNotSynced(_) => true, - Error::FailedToReadBestFinalizedHeaderHash { ref error, .. } => - error.is_connection_error(), - Error::FailedToReadBestHeader { ref error, .. } => error.is_connection_error(), - Error::FailedToReadHeaderByHash { ref error, .. } => error.is_connection_error(), - Error::ErrorExecutingRuntimeCall { ref error, .. } => error.is_connection_error(), - Error::FailedToReadRuntimeStorageValue { ref error, .. } => error.is_connection_error(), - _ => false, + Error::UnorderedFinalizedHeaders { .. } => true, + _ => self.nested().map(|e| e.is_connection_error()).unwrap_or(false), } } } diff --git a/bridges/relays/client-substrate/src/guard.rs b/bridges/relays/client-substrate/src/guard.rs index 47454892cd039f40e92a8f25db435698360bde9b..3dbf95bff8e10de452992a0f3d42befbe058dd74 100644 --- a/bridges/relays/client-substrate/src/guard.rs +++ b/bridges/relays/client-substrate/src/guard.rs @@ -98,7 +98,7 @@ fn conditions_check_delay() -> Duration { } #[async_trait] -impl Environment for Client { +impl> Environment for Clnt { type Error = Error; async fn runtime_version(&mut self) -> Result { diff --git a/bridges/relays/client-substrate/src/lib.rs b/bridges/relays/client-substrate/src/lib.rs index d5b8d4dcced2d8b2e1883b6779905c536eec49f1..12a1c48c09c7ad59d05c0e40a578bc71f7575b35 100644 --- a/bridges/relays/client-substrate/src/lib.rs +++ b/bridges/relays/client-substrate/src/lib.rs @@ -21,7 +21,6 @@ mod chain; mod client; mod error; -mod rpc; mod sync_header; mod transaction_tracker; @@ -37,14 +36,15 @@ pub use crate::{ AccountKeyPairOf, BlockWithJustification, CallOf, Chain, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, ChainWithRuntimeVersion, ChainWithTransactions, ChainWithUtilityPallet, FullRuntimeUtilityPallet, MockedRuntimeUtilityPallet, Parachain, - RelayChain, SignParam, TransactionStatusOf, UnsignedTransaction, UtilityPallet, + RelayChain, SignParam, SignedBlockOf, TransactionStatusOf, UnsignedTransaction, + UtilityPallet, }, client::{ - is_ancient_block, ChainRuntimeVersion, Client, OpaqueGrandpaAuthoritiesSet, - SimpleRuntimeVersion, Subscription, ANCIENT_BLOCK_THRESHOLD, + is_ancient_block, rpc_with_caching as new, ChainRuntimeVersion, Client, + OpaqueGrandpaAuthoritiesSet, RpcWithCachingClient, SimpleRuntimeVersion, StreamDescription, + Subscription, ANCIENT_BLOCK_THRESHOLD, }, error::{Error, Result}, - rpc::{SubstrateBeefyFinalityClient, SubstrateFinalityClient, SubstrateGrandpaFinalityClient}, sync_header::SyncHeader, transaction_tracker::TransactionTracker, }; diff --git a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs index 7bb92693b38d27f42b623d323ba3e7ced8ebbda2..27c9d8cd7a8b68d4ad04ff4ab00f89f408f96a28 100644 --- a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs +++ b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::{chain::Chain, client::Client, Error as SubstrateError}; +use crate::{Chain, Client, Error as SubstrateError}; use async_std::sync::{Arc, RwLock}; use async_trait::async_trait; @@ -66,20 +66,20 @@ impl FloatStorageValue for FixedU128OrOne { /// Metric that represents fixed-point runtime storage value as float gauge. #[derive(Clone, Debug)] -pub struct FloatStorageValueMetric { +pub struct FloatStorageValueMetric { value_converter: V, - client: Client, + client: Clnt, storage_key: StorageKey, metric: Gauge, shared_value_ref: F64SharedRef, - _phantom: PhantomData, + _phantom: PhantomData<(C, V)>, } -impl FloatStorageValueMetric { +impl FloatStorageValueMetric { /// Create new metric. pub fn new( value_converter: V, - client: Client, + client: Clnt, storage_key: StorageKey, name: String, help: String, @@ -101,32 +101,39 @@ impl FloatStorageValueMetric { } } -impl Metric for FloatStorageValueMetric { +impl, V: FloatStorageValue> Metric + for FloatStorageValueMetric +{ fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { register(self.metric.clone(), registry).map(drop) } } #[async_trait] -impl StandaloneMetric for FloatStorageValueMetric { +impl, V: FloatStorageValue> StandaloneMetric + for FloatStorageValueMetric +{ fn update_interval(&self) -> Duration { C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS } async fn update(&self) { - let value = self - .client - .raw_storage_value(self.storage_key.clone(), None) - .await - .and_then(|maybe_storage_value| { - self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { - maybe_fixed_point_value.map(|fixed_point_value| { - fixed_point_value.into_inner().unique_saturated_into() as f64 / - V::Value::DIV.unique_saturated_into() as f64 - }) + let value = async move { + let best_header_hash = self.client.best_header_hash().await?; + let maybe_storage_value = self + .client + .raw_storage_value(best_header_hash, self.storage_key.clone()) + .await?; + self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { + maybe_fixed_point_value.map(|fixed_point_value| { + fixed_point_value.into_inner().unique_saturated_into() as f64 / + V::Value::DIV.unique_saturated_into() as f64 }) }) - .map_err(|e| e.to_string()); + } + .await + .map_err(|e| e.to_string()); + relay_utils::metrics::set_gauge_value(&self.metric, value.clone()); *self.shared_value_ref.write().await = value.ok().and_then(|x| x); } diff --git a/bridges/relays/client-substrate/src/test_chain.rs b/bridges/relays/client-substrate/src/test_chain.rs index cfd241c022a269da799e8e03c4398566d98a14a0..991202e9874c790e404b42b74102248cb5f6723f 100644 --- a/bridges/relays/client-substrate/src/test_chain.rs +++ b/bridges/relays/client-substrate/src/test_chain.rs @@ -24,7 +24,7 @@ use crate::{Chain, ChainWithBalances, ChainWithMessages}; use bp_messages::{ChainWithMessages as ChainWithMessagesBase, MessageNonce}; use bp_runtime::ChainId; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; use std::time::Duration; /// Chain that may be used in tests. @@ -44,6 +44,8 @@ impl bp_runtime::Chain for TestChain { type Nonce = u32; type Signature = sp_runtime::testing::TestSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 100000 } @@ -100,6 +102,8 @@ impl bp_runtime::Chain for TestParachainBase { type Nonce = u32; type Signature = sp_runtime::testing::TestSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { unreachable!() } diff --git a/bridges/relays/client-substrate/src/transaction_tracker.rs b/bridges/relays/client-substrate/src/transaction_tracker.rs index b181a945c2c15393daf821901b298e81214f85e3..b4801c89f51e1b1d9fd31373cdafb5b4770037cc 100644 --- a/bridges/relays/client-substrate/src/transaction_tracker.rs +++ b/bridges/relays/client-substrate/src/transaction_tracker.rs @@ -16,7 +16,7 @@ //! Helper for tracking transaction invalidation events. -use crate::{Chain, Client, Error, HashOf, HeaderIdOf, Subscription, TransactionStatusOf}; +use crate::{Chain, Error, HashOf, HeaderIdOf, Subscription, TransactionStatusOf}; use async_trait::async_trait; use futures::{future::Either, Future, FutureExt, Stream, StreamExt}; @@ -31,8 +31,10 @@ pub trait Environment: Send + Sync { async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error>; } +// TODO (https://github.com/paritytech/parity-bridges-common/issues/2133): remove `Environment` trait +// after test client is implemented #[async_trait] -impl Environment for Client { +impl> Environment for T { async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error> { self.header_by_hash(hash).await.map(|h| HeaderId(*h.number(), hash)) } @@ -76,6 +78,21 @@ impl> TransactionTracker { Self { environment, stall_timeout, transaction_hash, subscription } } + // TODO (https://github.com/paritytech/parity-bridges-common/issues/2133): remove me after + // test client is implemented + /// Converts self into tracker with different environment. + pub fn switch_environment>( + self, + environment: NewE, + ) -> TransactionTracker { + TransactionTracker { + environment, + stall_timeout: self.stall_timeout, + transaction_hash: self.transaction_hash, + subscription: self.subscription, + } + } + /// Wait for final transaction status and return it along with last known internal invalidation /// status. async fn do_wait( @@ -88,7 +105,7 @@ impl> TransactionTracker { let wait_for_invalidation = watch_transaction_status::<_, C, _>( self.environment, self.transaction_hash, - self.subscription.into_stream(), + self.subscription, ); futures::pin_mut!(wait_for_stall_timeout, wait_for_invalidation); @@ -284,7 +301,7 @@ async fn watch_transaction_status< #[cfg(test)] mod tests { use super::*; - use crate::test_chain::TestChain; + use crate::{test_chain::TestChain, StreamDescription}; use futures::{FutureExt, SinkExt}; use sc_transaction_pool_api::TransactionStatus; @@ -306,22 +323,27 @@ mod tests { TrackedTransactionStatus>, InvalidationStatus>, )> { - let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (mut sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), + Subscription::new_forwarded( + StreamDescription::new("test".into(), "test".into()), + receiver, + ), ); - let wait_for_stall_timeout = futures::future::pending(); + // we can't do `.now_or_never()` on `do_wait()` call, because `Subscription` has its own + // background thread, which may cause additional async task switches => let's leave some + // relatively small timeout here + let wait_for_stall_timeout = async_std::task::sleep(std::time::Duration::from_millis(100)); let wait_for_stall_timeout_rest = futures::future::ready(()); - sender.send(Some(status)).await.unwrap(); - tx_tracker - .do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest) - .now_or_never() - .map(|(ts, is)| (ts, is.unwrap())) + sender.send(Ok(status)).await.unwrap(); + + let (ts, is) = + tx_tracker.do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest).await; + is.map(|is| (ts, is)) } #[async_std::test] @@ -429,13 +451,15 @@ mod tests { #[async_std::test] async fn lost_on_timeout_when_waiting_for_invalidation_status() { - let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (_sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), + Subscription::new_forwarded( + StreamDescription::new("test".into(), "test".into()), + receiver, + ), ); let wait_for_stall_timeout = futures::future::ready(()).shared(); diff --git a/bridges/relays/equivocation/Cargo.toml b/bridges/relays/equivocation/Cargo.toml index 5a067b62e0774ffb93e8b935ed287696e1fefd7c..09bdda23f2c25edabc5c4adbf6fa6739b99ddeef 100644 --- a/bridges/relays/equivocation/Cargo.toml +++ b/bridges/relays/equivocation/Cargo.toml @@ -12,12 +12,12 @@ publish = false workspace = true [dependencies] -async-std = { version = "1.9.0", features = ["attributes"] } -async-trait = "0.1.79" -bp-header-chain = { path = "../../primitives/header-chain" } -finality-relay = { path = "../finality" } -frame-support = { path = "../../../substrate/frame/support" } -futures = "0.3.30" +async-std = { features = ["attributes"], workspace = true } +async-trait = { workspace = true } +bp-header-chain = { workspace = true, default-features = true } +finality-relay = { workspace = true } +frame-support = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true } -num-traits = "0.2" -relay-utils = { path = "../utils" } +num-traits = { workspace = true, default-features = true } +relay-utils = { workspace = true } diff --git a/bridges/relays/finality/Cargo.toml b/bridges/relays/finality/Cargo.toml index 5ee4b10fa638f5ec226df14beca1e0c79d0055df..06c4a5dcc43e0d54410424a85e2ff72dbbf24729 100644 --- a/bridges/relays/finality/Cargo.toml +++ b/bridges/relays/finality/Cargo.toml @@ -12,14 +12,14 @@ publish = false workspace = true [dependencies] -async-std = "1.9.0" -async-trait = "0.1.79" -backoff = "0.4" -bp-header-chain = { path = "../../primitives/header-chain" } -futures = "0.3.30" +async-std = { workspace = true } +async-trait = { workspace = true } +backoff = { workspace = true } +bp-header-chain = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true } -num-traits = "0.2" -relay-utils = { path = "../utils" } +num-traits = { workspace = true, default-features = true } +relay-utils = { workspace = true } [dev-dependencies] -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } diff --git a/bridges/relays/finality/src/base.rs b/bridges/relays/finality/src/base.rs index 4253468eaace1ef2a2adc47790f7e16c38160200..8704bff95494a88274f0e78f5df53aa15708cccf 100644 --- a/bridges/relays/finality/src/base.rs +++ b/bridges/relays/finality/src/base.rs @@ -45,7 +45,3 @@ pub trait SourceClientBase: RelayClient { /// Subscribe to new finality proofs. async fn finality_proofs(&self) -> Result; } - -/// Target client used in finality related loops. -#[async_trait] -pub trait TargetClientBase: RelayClient {} diff --git a/bridges/relays/lib-substrate-relay/Cargo.toml b/bridges/relays/lib-substrate-relay/Cargo.toml index 077d1b1ff356a871364d45c1251aec0af7680cdd..b0f93e5b5485f24b230b9b2868c6301b6ed64181 100644 --- a/bridges/relays/lib-substrate-relay/Cargo.toml +++ b/bridges/relays/lib-substrate-relay/Cargo.toml @@ -11,52 +11,50 @@ publish = false workspace = true [dependencies] -anyhow = "1.0" -async-std = "1.9.0" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -hex = "0.4" +anyhow = { workspace = true } +async-std = { workspace = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +hex = { workspace = true, default-features = true } log = { workspace = true } -num-traits = "0.2" -rbtag = "0.3" -structopt = "0.3" -strum = { version = "0.26.2", features = ["derive"] } +num-traits = { workspace = true, default-features = true } +rbtag = { workspace = true } +structopt = { workspace = true } +strum = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } # Bridge dependencies +bp-header-chain = { workspace = true, default-features = true } +bp-parachains = { workspace = true, default-features = true } +bp-polkadot-core = { workspace = true, default-features = true } +bp-relayers = { workspace = true, default-features = true } -bp-header-chain = { path = "../../primitives/header-chain" } -bp-parachains = { path = "../../primitives/parachains" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-relayers = { path = "../../primitives/relayers" } -bridge-runtime-common = { path = "../../bin/runtime-common" } +equivocation-detector = { workspace = true } +finality-relay = { workspace = true } +parachains-relay = { workspace = true } +relay-utils = { workspace = true } +messages-relay = { workspace = true } +relay-substrate-client = { workspace = true } -equivocation-detector = { path = "../equivocation" } -finality-grandpa = { version = "0.16.2" } -finality-relay = { path = "../finality" } -parachains-relay = { path = "../parachains" } -relay-utils = { path = "../utils" } -messages-relay = { path = "../messages" } -relay-substrate-client = { path = "../client-substrate" } +pallet-bridge-grandpa = { workspace = true, default-features = true } +pallet-bridge-messages = { workspace = true, default-features = true } +pallet-bridge-parachains = { workspace = true, default-features = true } -pallet-bridge-grandpa = { path = "../../modules/grandpa" } -pallet-bridge-messages = { path = "../../modules/messages" } -pallet-bridge-parachains = { path = "../../modules/parachains" } - -bp-runtime = { path = "../../primitives/runtime" } -bp-messages = { path = "../../primitives/messages" } +bp-runtime = { workspace = true, default-features = true } +bp-messages = { workspace = true, default-features = true } # Substrate Dependencies - -frame-support = { path = "../../../substrate/frame/support" } -frame-system = { path = "../../../substrate/frame/system" } -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-grandpa = { path = "../../../substrate/frame/grandpa" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-grandpa = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true } [dev-dependencies] -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -relay-substrate-client = { path = "../client-substrate", features = ["test-helpers"] } +scale-info = { features = ["derive"], workspace = true } +pallet-transaction-payment = { workspace = true, default-features = true } +relay-substrate-client = { features = ["test-helpers"], workspace = true } diff --git a/bridges/relays/lib-substrate-relay/src/cli/bridge.rs b/bridges/relays/lib-substrate-relay/src/cli/bridge.rs index 316f59a2b0c86e1bc78c1446bb69a90b0b0bf0f7..5631285b3c544de0e3caf85a6b74b7ee31601c56 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/bridge.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/bridge.rs @@ -19,7 +19,7 @@ use crate::{ equivocation::SubstrateEquivocationDetectionPipeline, finality::SubstrateFinalitySyncPipeline, - messages_lane::{MessagesRelayLimits, SubstrateMessageLane}, + messages::{MessagesRelayLimits, SubstrateMessageLane}, parachains::SubstrateParachainsPipeline, }; use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; diff --git a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs index 6246bdbf015152fe0e82ac0a8287692df6a1ace9..d985d35c9e802e694e74264ed3f611f14e8bc0d2 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs @@ -123,11 +123,11 @@ macro_rules! declare_chain_connection_params_cli_schema { #[allow(dead_code)] pub async fn into_client( self, - ) -> anyhow::Result> { + ) -> anyhow::Result<$crate::cli::DefaultClient> { let chain_runtime_version = self .[<$chain_prefix _runtime_version>] .into_runtime_version(Chain::RUNTIME_VERSION)?; - Ok(relay_substrate_client::Client::new(relay_substrate_client::ConnectionParams { + Ok(relay_substrate_client::new(relay_substrate_client::ConnectionParams { uri: self.[<$chain_prefix _uri>], host: self.[<$chain_prefix _host>], port: self.[<$chain_prefix _port>], diff --git a/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs b/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs index b98e41b2a43e4f88670ac8ed1d7129a0187fecfe..3921685d9e8ad70a9173a87ef19fb567c9263a63 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs @@ -23,7 +23,7 @@ use crate::{ }; use async_trait::async_trait; -use relay_substrate_client::ChainWithTransactions; +use relay_substrate_client::{ChainWithTransactions, Client}; use structopt::StructOpt; /// Start equivocation detection loop. diff --git a/bridges/relays/lib-substrate-relay/src/cli/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/mod.rs index 270608bf6ed8e1500d10000173bd7945a31c8135..ddb3e416dc32628b5a85f27f322d0568cbee1c10 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/mod.rs @@ -35,6 +35,11 @@ pub mod relay_parachains; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "bridge"; +/// Default Substrate client type that we are using. We'll use it all over the glue CLI code +/// to avoid multiple level generic arguments and constraints. We still allow usage of other +/// clients in the **core logic code**. +pub type DefaultClient = relay_substrate_client::RpcWithCachingClient; + /// Lane id. #[derive(Debug, Clone, PartialEq, Eq)] pub struct HexLaneId(pub [u8; 4]); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index 093f98ef21ed24b40a0c2d8f217d84b841137a69..ea92a0c9acce15a7f5000d0dd7e0253bbafc7844 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -29,6 +29,7 @@ use crate::{ finality::SubstrateFinalitySyncPipeline, HeadersToRelay, }; +use relay_substrate_client::Client; /// Chain headers relaying params. #[derive(StructOpt)] diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs index a796df6721b8c8afd7f401f92e2fca6afcb41b02..338dda3c63309acbefd2616d052ae5dc4bf1d1e0 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs @@ -37,8 +37,8 @@ use structopt::StructOpt; use futures::{FutureExt, TryFutureExt}; use crate::{ - cli::{bridge::MessagesCliBridge, HexLaneId, PrometheusParams}, - messages_lane::{MessagesRelayLimits, MessagesRelayParams}, + cli::{bridge::MessagesCliBridge, DefaultClient, HexLaneId, PrometheusParams}, + messages::{MessagesRelayLimits, MessagesRelayParams}, on_demand::OnDemandRelay, HeadersToRelay, TaggedAccount, TransactionParams, }; @@ -46,7 +46,7 @@ use bp_messages::LaneId; use bp_runtime::BalanceOf; use relay_substrate_client::{ AccountIdOf, AccountKeyPairOf, Chain, ChainWithBalances, ChainWithMessages, - ChainWithRuntimeVersion, ChainWithTransactions, Client, + ChainWithRuntimeVersion, ChainWithTransactions, }; use relay_utils::metrics::MetricsParams; use sp_core::Pair; @@ -118,7 +118,7 @@ impl< /// Parameters that are associated with one side of the bridge. pub struct BridgeEndCommonParams { /// Chain client. - pub client: Client, + pub client: DefaultClient, /// Params used for sending transactions to the chain. pub tx_params: TransactionParams>, /// Accounts, which balances are exposed as metrics by the relay process. @@ -165,7 +165,7 @@ where target_to_source_headers_relay: Arc>, lane_id: LaneId, maybe_limits: Option, - ) -> MessagesRelayParams { + ) -> MessagesRelayParams, DefaultClient> { MessagesRelayParams { source_client: self.source.client.clone(), source_transaction_params: self.source.tx_params.clone(), @@ -298,14 +298,14 @@ where .collect::>(); { let common = self.mut_base().mut_common(); - crate::messages_metrics::add_relay_balances_metrics::<_, Self::Right>( + crate::messages::metrics::add_relay_balances_metrics::<_, Self::Right>( common.left.client.clone(), &common.metrics_params, &common.left.accounts, &lanes, ) .await?; - crate::messages_metrics::add_relay_balances_metrics::<_, Self::Left>( + crate::messages::metrics::add_relay_balances_metrics::<_, Self::Left>( common.right.client.clone(), &common.metrics_params, &common.right.accounts, @@ -317,28 +317,30 @@ where // Need 2x capacity since we consider both directions for each lane let mut message_relays = Vec::with_capacity(lanes.len() * 2); for lane in lanes { - let left_to_right_messages = crate::messages_lane::run::< - ::MessagesLane, - >(self.left_to_right().messages_relay_params( - left_to_right_on_demand_headers.clone(), - right_to_left_on_demand_headers.clone(), - lane, - Self::L2R::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); + let left_to_right_messages = + crate::messages::run::<::MessagesLane, _, _>( + self.left_to_right().messages_relay_params( + left_to_right_on_demand_headers.clone(), + right_to_left_on_demand_headers.clone(), + lane, + Self::L2R::maybe_messages_limits(), + ), + ) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); message_relays.push(left_to_right_messages); - let right_to_left_messages = crate::messages_lane::run::< - ::MessagesLane, - >(self.right_to_left().messages_relay_params( - right_to_left_on_demand_headers.clone(), - left_to_right_on_demand_headers.clone(), - lane, - Self::R2L::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); + let right_to_left_messages = + crate::messages::run::<::MessagesLane, _, _>( + self.right_to_left().messages_relay_params( + right_to_left_on_demand_headers.clone(), + left_to_right_on_demand_headers.clone(), + lane, + Self::R2L::maybe_messages_limits(), + ), + ) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); message_relays.push(right_to_left_messages); } diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs index 7f6f40777823679c97577f1244eb9a860948d267..8104be7af807a67bbc001e70e24565c81b6beb17 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs @@ -23,6 +23,7 @@ use crate::{ cli::{ bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, + DefaultClient, }, finality::SubstrateFinalitySyncPipeline, on_demand::{ @@ -52,9 +53,9 @@ pub struct ParachainToParachainBridge< pub common: Full2WayBridgeCommonParams<::Target, ::Target>, /// Client of the left relay chain. - pub left_relay: Client<::SourceRelay>, + pub left_relay: DefaultClient<::SourceRelay>, /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, + pub right_relay: DefaultClient<::SourceRelay>, } /// Create set of configuration objects specific to parachain-to-parachain relayer. @@ -175,25 +176,33 @@ where ) .await?; - let left_relay_to_right_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.left_relay.clone(), - self.common.right.client.clone(), - self.common.right.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); + let left_relay_to_right_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.left_relay.clone(), + self.common.right.client.clone(), + self.common.right.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); + let right_relay_to_left_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.right_relay.clone(), + self.common.left.client.clone(), + self.common.left.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); let left_to_right_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.left_relay.clone(), self.common.right.client.clone(), @@ -202,6 +211,8 @@ where ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.right_relay.clone(), self.common.left.client.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs index 5911fe49df4adfc955cbab4d142998fbc7ed4d22..6c078973fedc08a724e51808f2cb47f3a64ca1a1 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs @@ -26,6 +26,7 @@ use crate::{ RelayToRelayHeadersCliBridge, }, relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, + DefaultClient, }, finality::SubstrateFinalitySyncPipeline, on_demand::{ @@ -54,7 +55,7 @@ pub struct RelayToParachainBridge< pub common: Full2WayBridgeCommonParams<::Target, ::Target>, /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, + pub right_relay: DefaultClient<::SourceRelay>, } /// Create set of configuration objects specific to relay-to-parachain relayer. @@ -167,23 +168,28 @@ where .await?; let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), self.common.shared.headers_to_relay(), None, ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); + let right_relay_to_left_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.right_relay.clone(), + self.common.left.client.clone(), + self.common.left.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.right_relay.clone(), self.common.left.client.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs index 832df4ae4003ced1715d7b9d495989d9163417d5..3f8c8bb40c99c7218cdc69a2524ec5dd113e621f 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs @@ -32,7 +32,7 @@ use crate::{ on_demand::{headers::OnDemandHeadersRelay, OnDemandRelay}, }; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, ChainWithRuntimeVersion, ChainWithTransactions, Client, }; use sp_core::Pair; @@ -148,7 +148,7 @@ where .await?; let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), @@ -156,7 +156,7 @@ where None, ); let right_to_left_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.right.client.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs index 943feba072e408d98360ece228fe8c5558181b69..68bbe71ae599c901bdf7a9b6e55cf93c020adbb6 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs @@ -18,7 +18,7 @@ use crate::{ cli::{bridge::*, chain_schema::*, HexLaneId, PrometheusParams}, - messages_lane::MessagesRelayParams, + messages::MessagesRelayParams, TransactionParams, }; @@ -29,7 +29,8 @@ use structopt::StructOpt; use bp_messages::MessageNonce; use bp_runtime::HeaderIdProvider; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, + ChainWithTransactions, Client, }; use relay_utils::UniqueSaturatedInto; @@ -116,7 +117,7 @@ where let target_sign = data.target_sign.to_keypair::()?; let target_transactions_mortality = data.target_sign.transactions_mortality()?; - crate::messages_lane::run::(MessagesRelayParams { + crate::messages::run::(MessagesRelayParams { source_client, source_transaction_params: TransactionParams { signer: source_sign, @@ -160,7 +161,7 @@ where })? .id(); - crate::messages_lane::relay_messages_range::( + crate::messages::relay_messages_range::( source_client, target_client, TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, @@ -196,7 +197,7 @@ where })? .id(); - crate::messages_lane::relay_messages_delivery_confirmation::( + crate::messages::relay_messages_delivery_confirmation::( source_client, target_client, TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs index 00f8cf79ef1fb54577954cf198e7296819591a43..77cd395ff7225e9a0d088cc35cf0f86ab78caf46 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs @@ -21,7 +21,7 @@ use async_trait::async_trait; use bp_polkadot_core::BlockNumber as RelayBlockNumber; use bp_runtime::HeaderIdProvider; use parachains_relay::parachains_loop::{AvailableHeader, SourceClient, TargetClient}; -use relay_substrate_client::Parachain; +use relay_substrate_client::{Client, Parachain}; use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; use std::sync::Arc; use structopt::StructOpt; @@ -30,7 +30,7 @@ use crate::{ cli::{ bridge::{CliBridgeBase, ParachainToRelayHeadersCliBridge}, chain_schema::*, - PrometheusParams, + DefaultClient, PrometheusParams, }, parachains::{source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter}, TransactionParams, @@ -72,16 +72,19 @@ pub struct RelayParachainHeadParams { #[async_trait] pub trait ParachainsRelayer: ParachainToRelayHeadersCliBridge where - ParachainsSource: + ParachainsSource>: SourceClient>, - ParachainsTarget: - TargetClient>, + ParachainsTarget< + Self::ParachainFinality, + DefaultClient, + DefaultClient, + >: TargetClient>, ::Source: Parachain, { /// Start relaying parachains finality. async fn relay_parachains(data: RelayParachainsParams) -> anyhow::Result<()> { let source_chain_client = data.source.into_client::().await?; - let source_client = ParachainsSource::::new( + let source_client = ParachainsSource::::new( source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -91,7 +94,7 @@ where mortality: data.target_sign.target_transactions_mortality, }; let target_chain_client = data.target.into_client::().await?; - let target_client = ParachainsTarget::::new( + let target_client = ParachainsTarget::::new( source_chain_client, target_chain_client, target_transaction_params, @@ -121,7 +124,7 @@ where .map_err(|e| anyhow::format_err!("{}", e))? .id(); - let source_client = ParachainsSource::::new( + let source_client = ParachainsSource::::new( source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -131,7 +134,7 @@ where mortality: data.target_sign.target_transactions_mortality, }; let target_chain_client = data.target.into_client::().await?; - let target_client = ParachainsTarget::::new( + let target_client = ParachainsTarget::::new( source_chain_client, target_chain_client, target_transaction_params, diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs b/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs index f6d58cbaa4ab4c4d7f489de5a80ab226b3b475b4..f8077923b82023cc640ddcf131f248ef141853a0 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs @@ -69,7 +69,7 @@ pub trait SubstrateEquivocationDetectionPipeline: /// Add relay guards if required. async fn start_relay_guards( - source_client: &Client, + source_client: &impl Client, enable_version_guard: bool, ) -> relay_substrate_client::Result<()> { if enable_version_guard { @@ -199,8 +199,8 @@ macro_rules! generate_report_equivocation_call_builder { /// Run Substrate-to-Substrate equivocations detection loop. pub async fn run( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, metrics_params: MetricsParams, ) -> anyhow::Result<()> { @@ -212,8 +212,8 @@ pub async fn run( ); equivocation_detector::run( - SubstrateEquivocationSource::

::new(source_client, source_transaction_params), - SubstrateEquivocationTarget::

::new(target_client), + SubstrateEquivocationSource::::new(source_client, source_transaction_params), + SubstrateEquivocationTarget::::new(target_client), P::TargetChain::AVERAGE_BLOCK_INTERVAL, metrics_params, futures::future::pending(), diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/source.rs b/bridges/relays/lib-substrate-relay/src/equivocation/source.rs index a0c7dcf5cbc32c7e5a39de5acd53d92def24a22f..66d651600a1ec72943cb4d13c3e094c5be0a33e3 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/source.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/source.rs @@ -35,29 +35,35 @@ use relay_substrate_client::{ use relay_utils::relay_loop::Client as RelayClient; /// Substrate node as equivocation source. -pub struct SubstrateEquivocationSource { - client: Client, +pub struct SubstrateEquivocationSource { + client: SourceClnt, transaction_params: TransactionParams>, } -impl SubstrateEquivocationSource

{ +impl> + SubstrateEquivocationSource +{ /// Create new instance of `SubstrateEquivocationSource`. pub fn new( - client: Client, + client: SourceClnt, transaction_params: TransactionParams>, ) -> Self { Self { client, transaction_params } } } -impl Clone for SubstrateEquivocationSource

{ +impl> Clone + for SubstrateEquivocationSource +{ fn clone(&self) -> Self { Self { client: self.client.clone(), transaction_params: self.transaction_params.clone() } } } #[async_trait] -impl RelayClient for SubstrateEquivocationSource

{ +impl> RelayClient + for SubstrateEquivocationSource +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -66,8 +72,9 @@ impl RelayClient for SubstrateEquivoc } #[async_trait] -impl - SourceClientBase> for SubstrateEquivocationSource

+impl> + SourceClientBase> + for SubstrateEquivocationSource { type FinalityProofsStream = SubstrateFinalityProofsStream

; @@ -77,10 +84,11 @@ impl } #[async_trait] -impl - SourceClient> for SubstrateEquivocationSource

+impl> + SourceClient> + for SubstrateEquivocationSource { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn report_equivocation( &self, diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/target.rs b/bridges/relays/lib-substrate-relay/src/equivocation/target.rs index 6eee2ab91d45b033a77e30b7d05ae28b246b9735..7d054e843d0db6c09b5baec551c46bb9fbe0fc34 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/target.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/target.rs @@ -34,27 +34,33 @@ use sp_runtime::traits::Header; use std::marker::PhantomData; /// Substrate node as equivocation source. -pub struct SubstrateEquivocationTarget { - client: Client, +pub struct SubstrateEquivocationTarget { + client: TargetClnt, _phantom: PhantomData

, } -impl SubstrateEquivocationTarget

{ +impl> + SubstrateEquivocationTarget +{ /// Create new instance of `SubstrateEquivocationTarget`. - pub fn new(client: Client) -> Self { + pub fn new(client: TargetClnt) -> Self { Self { client, _phantom: Default::default() } } } -impl Clone for SubstrateEquivocationTarget

{ +impl> Clone + for SubstrateEquivocationTarget +{ fn clone(&self) -> Self { Self { client: self.client.clone(), _phantom: Default::default() } } } #[async_trait] -impl RelayClient for SubstrateEquivocationTarget

{ +impl> RelayClient + for SubstrateEquivocationTarget +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -63,8 +69,9 @@ impl RelayClient for SubstrateEquivoc } #[async_trait] -impl - TargetClient> for SubstrateEquivocationTarget

+impl> + TargetClient> + for SubstrateEquivocationTarget { async fn best_finalized_header_number( &self, diff --git a/bridges/relays/lib-substrate-relay/src/finality/initialize.rs b/bridges/relays/lib-substrate-relay/src/finality/initialize.rs index 5dde46c39dd674e7c01eebba4b014bc999611eb5..a972f743e117ce280fb2dd5a950581713766a3b6 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/initialize.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/initialize.rs @@ -39,8 +39,8 @@ pub async fn initialize< TargetChain: ChainWithTransactions, F, >( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_signer: AccountKeyPairOf, prepare_initialize_transaction: F, dry_run: bool, @@ -101,8 +101,8 @@ async fn do_initialize< TargetChain: ChainWithTransactions, F, >( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_signer: AccountKeyPairOf, prepare_initialize_transaction: F, dry_run: bool, diff --git a/bridges/relays/lib-substrate-relay/src/finality/mod.rs b/bridges/relays/lib-substrate-relay/src/finality/mod.rs index 0293e1da224a6323fed59f7f727b5d5263391bb8..a2379eb4812e2ab7d904eff0ce9b082ac474d36e 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/mod.rs @@ -77,7 +77,7 @@ pub trait SubstrateFinalitySyncPipeline: BaseSubstrateFinalitySyncPipeline { /// Add relay guards if required. async fn start_relay_guards( - target_client: &Client, + target_client: &impl Client, enable_version_guard: bool, ) -> relay_substrate_client::Result<()> { if enable_version_guard { @@ -240,8 +240,8 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { /// Run Substrate-to-Substrate finality sync loop. pub async fn run( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, headers_to_relay: HeadersToRelay, transaction_params: TransactionParams>, metrics_params: MetricsParams, @@ -255,8 +255,8 @@ pub async fn run( ); finality_relay::run( - SubstrateFinalitySource::

::new(source_client, None), - SubstrateFinalityTarget::

::new(target_client, transaction_params.clone()), + SubstrateFinalitySource::::new(source_client, None), + SubstrateFinalityTarget::::new(target_client, transaction_params.clone()), finality_relay::FinalitySyncParams { tick: std::cmp::max( P::SourceChain::AVERAGE_BLOCK_INTERVAL, @@ -279,12 +279,12 @@ pub async fn run( /// Relay single header. No checks are made to ensure that transaction will succeed. pub async fn relay_single_header( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, transaction_params: TransactionParams>, header_number: BlockNumberOf, ) -> anyhow::Result<()> { - let finality_source = SubstrateFinalitySource::

::new(source_client, None); + let finality_source = SubstrateFinalitySource::::new(source_client, None); let (header, proof) = finality_source.header_and_finality_proof(header_number).await?; let Some(proof) = proof else { return Err(anyhow::format_err!( @@ -295,7 +295,7 @@ pub async fn relay_single_header( )); }; - let finality_target = SubstrateFinalityTarget::

::new(target_client, transaction_params); + let finality_target = SubstrateFinalityTarget::::new(target_client, transaction_params); let tx_tracker = finality_target.submit_finality_proof(header, proof, false).await?; match tx_tracker.wait().await { TrackedTransactionStatus::Finalized(_) => Ok(()), diff --git a/bridges/relays/lib-substrate-relay/src/finality/source.rs b/bridges/relays/lib-substrate-relay/src/finality/source.rs index c94af6108957a0d2d4b0b4079220be9c11a5a470..f6fa5c24add5066ab0c062a6966844b48be17ce8 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/source.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/source.rs @@ -40,22 +40,24 @@ use relay_utils::{relay_loop::Client as RelayClient, UniqueSaturatedInto}; pub type RequiredHeaderNumberRef = Arc::BlockNumber>>; /// Substrate node as finality source. -pub struct SubstrateFinalitySource { - client: Client, +pub struct SubstrateFinalitySource { + client: SourceClnt, maximal_header_number: Option>, } -impl SubstrateFinalitySource

{ +impl> + SubstrateFinalitySource +{ /// Create new headers source using given client. pub fn new( - client: Client, + client: SourceClnt, maximal_header_number: Option>, ) -> Self { SubstrateFinalitySource { client, maximal_header_number } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { + pub fn client(&self) -> &SourceClnt { &self.client } @@ -174,7 +176,9 @@ impl SubstrateFinalitySource

{ } } -impl Clone for SubstrateFinalitySource

{ +impl Clone + for SubstrateFinalitySource +{ fn clone(&self) -> Self { SubstrateFinalitySource { client: self.client.clone(), @@ -184,7 +188,9 @@ impl Clone for SubstrateFinalitySource

{ } #[async_trait] -impl RelayClient for SubstrateFinalitySource

{ +impl> RelayClient + for SubstrateFinalitySource +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -193,8 +199,8 @@ impl RelayClient for SubstrateFinalitySource

SourceClientBase> - for SubstrateFinalitySource

+impl> + SourceClientBase> for SubstrateFinalitySource { type FinalityProofsStream = SubstrateFinalityProofsStream

; @@ -204,8 +210,8 @@ impl SourceClientBase SourceClient> - for SubstrateFinalitySource

+impl> + SourceClient> for SubstrateFinalitySource { async fn best_finalized_block_number(&self) -> Result, Error> { let mut finalized_header_number = self.on_chain_best_finalized_block_number().await?; @@ -235,7 +241,7 @@ impl SourceClient( - client: &Client, + client: &impl Client, number: BlockNumberOf, ) -> Result< ( @@ -244,8 +250,8 @@ async fn header_and_finality_proof( ), Error, > { - let header_hash = client.block_hash_by_number(number).await?; - let signed_block = client.get_block(Some(header_hash)).await?; + let header_hash = client.header_hash_by_number(number).await?; + let signed_block = client.block_by_hash(header_hash).await?; let justification = signed_block .justification(P::FinalityEngine::ID) diff --git a/bridges/relays/lib-substrate-relay/src/finality/target.rs b/bridges/relays/lib-substrate-relay/src/finality/target.rs index 52ab2462c62c4784b80bfbd128c11194a4f2edd4..18b696685dd4e7122a7ff6a9f7d7cb2afd333f5f 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/target.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/target.rs @@ -28,22 +28,25 @@ use async_trait::async_trait; use bp_runtime::BlockNumberOf; use finality_relay::TargetClient; use relay_substrate_client::{ - AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, TransactionEra, - TransactionTracker, UnsignedTransaction, + AccountIdOf, AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, + TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; +use sp_core::Pair; use sp_runtime::traits::Header; /// Substrate client as Substrate finality target. -pub struct SubstrateFinalityTarget { - client: Client, +pub struct SubstrateFinalityTarget { + client: TargetClnt, transaction_params: TransactionParams>, } -impl SubstrateFinalityTarget

{ +impl> + SubstrateFinalityTarget +{ /// Create new Substrate headers target. pub fn new( - client: Client, + client: TargetClnt, transaction_params: TransactionParams>, ) -> Self { SubstrateFinalityTarget { client, transaction_params } @@ -65,7 +68,9 @@ impl SubstrateFinalityTarget

{ } } -impl Clone for SubstrateFinalityTarget

{ +impl Clone + for SubstrateFinalityTarget +{ fn clone(&self) -> Self { SubstrateFinalityTarget { client: self.client.clone(), @@ -75,7 +80,9 @@ impl Clone for SubstrateFinalityTarget

{ } #[async_trait] -impl RelayClient for SubstrateFinalityTarget

{ +impl> RelayClient + for SubstrateFinalityTarget +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -84,10 +91,12 @@ impl RelayClient for SubstrateFinalityTarget

TargetClient> - for SubstrateFinalityTarget

+impl> + TargetClient> for SubstrateFinalityTarget +where + AccountIdOf: From< as Pair>::Public>, { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn best_finalized_source_block_id(&self) -> Result, Error> { // we can't continue to relay finality if target node is out of sync, because @@ -109,10 +118,10 @@ impl TargetClient Result>, Self::Error> { Ok(self .client - .typed_state_call( + .state_call( + self.client.best_header().await?.hash(), P::SourceChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), - Some(self.client.best_header().await?.hash()), ) .await .unwrap_or_else(|e| { diff --git a/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs b/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs index 5a9ec42fde5a38450def1a62935852fe77801df5..4f15d68771940812d8a2f05c5656440100033dab 100644 --- a/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs +++ b/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs @@ -28,10 +28,11 @@ use bp_header_chain::{ }; use bp_runtime::{BasicOperatingMode, HeaderIdProvider, OperatingMode}; use codec::{Decode, Encode}; +use futures::stream::StreamExt; use num_traits::{One, Zero}; use relay_substrate_client::{ BlockNumberOf, Chain, ChainWithGrandpa, Client, Error as SubstrateError, HashOf, HeaderOf, - Subscription, SubstrateFinalityClient, SubstrateGrandpaFinalityClient, + Subscription, }; use sp_consensus_grandpa::{AuthorityList as GrandpaAuthoritiesSet, GRANDPA_ENGINE_ID}; use sp_core::{storage::StorageKey, Bytes}; @@ -45,8 +46,6 @@ pub trait Engine: Send { const ID: ConsensusEngineId; /// A reader that can extract the consensus log from the header digest and interpret it. type ConsensusLogReader: ConsensusLogReader; - /// Type of Finality RPC client used by this engine. - type FinalityClient: SubstrateFinalityClient; /// Type of finality proofs, used by consensus engine. type FinalityProof: FinalityProof, BlockNumberOf> + Decode + Encode; /// The context needed for verifying finality proofs. @@ -74,10 +73,10 @@ pub trait Engine: Send { /// Returns `Ok(true)` if finality pallet at the bridged chain has already been initialized. async fn is_initialized( - target_client: &Client, + target_client: &impl Client, ) -> Result { Ok(target_client - .raw_storage_value(Self::is_initialized_key(), None) + .raw_storage_value(target_client.best_header_hash().await?, Self::is_initialized_key()) .await? .is_some()) } @@ -88,10 +87,13 @@ pub trait Engine: Send { /// Returns `Ok(true)` if finality pallet at the bridged chain is halted. async fn is_halted( - target_client: &Client, + target_client: &impl Client, ) -> Result { Ok(target_client - .storage_value::(Self::pallet_operating_mode_key(), None) + .storage_value::( + target_client.best_header_hash().await?, + Self::pallet_operating_mode_key(), + ) .await? .map(|operating_mode| operating_mode.is_halted()) .unwrap_or(false)) @@ -99,17 +101,15 @@ pub trait Engine: Send { /// A method to subscribe to encoded finality proofs, given source client. async fn source_finality_proofs( - source_client: &Client, - ) -> Result, SubstrateError> { - source_client.subscribe_finality_justifications::().await - } + source_client: &impl Client, + ) -> Result, SubstrateError>; /// Verify and optimize finality proof before sending it to the target node. /// /// Apart from optimization, we expect this method to perform all required checks /// that the `header` and `proof` are valid at the current state of the target chain. async fn verify_and_optimize_proof( - target_client: &Client, + target_client: &impl Client, header: &C::Header, proof: &mut Self::FinalityProof, ) -> Result; @@ -123,19 +123,19 @@ pub trait Engine: Send { /// Prepare initialization data for the finality bridge pallet. async fn prepare_initialization_data( - client: Client, + client: impl Client, ) -> Result, BlockNumberOf>>; /// Get the context needed for validating a finality proof. async fn finality_verification_context( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result; /// Returns the finality info associated to the source headers synced with the target /// at the provided block. async fn synced_headers_finality_info( - target_client: &Client, + target_client: &impl Client, at: TargetChain::Hash, ) -> Result< Vec>, @@ -144,7 +144,7 @@ pub trait Engine: Send { /// Generate key ownership proof for the provided equivocation. async fn generate_source_key_ownership_proof( - source_client: &Client, + source_client: &impl Client, at: C::Hash, equivocation: &Self::EquivocationProof, ) -> Result; @@ -156,7 +156,7 @@ pub struct Grandpa(PhantomData); impl Grandpa { /// Read header by hash from the source client. async fn source_header( - source_client: &Client, + source_client: &impl Client, header_hash: C::Hash, ) -> Result, BlockNumberOf>> { source_client @@ -167,15 +167,15 @@ impl Grandpa { /// Read GRANDPA authorities set at given header. async fn source_authorities_set( - source_client: &Client, + source_client: &impl Client, header_hash: C::Hash, ) -> Result, BlockNumberOf>> { - let raw_authorities_set = source_client - .grandpa_authorities_set(header_hash) + const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; + + source_client + .state_call(header_hash, SUB_API_GRANDPA_AUTHORITIES.to_string(), ()) .await - .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err))?; - GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]) - .map_err(|err| Error::DecodeAuthorities(C::NAME, header_hash, err)) + .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err)) } } @@ -183,7 +183,6 @@ impl Grandpa { impl Engine for Grandpa { const ID: ConsensusEngineId = GRANDPA_ENGINE_ID; type ConsensusLogReader = GrandpaConsensusLogReader<::Number>; - type FinalityClient = SubstrateGrandpaFinalityClient; type FinalityProof = GrandpaJustification>; type FinalityVerificationContext = JustificationVerificationContext; type EquivocationProof = sp_consensus_grandpa::EquivocationProof, BlockNumberOf>; @@ -200,8 +199,14 @@ impl Engine for Grandpa { bp_header_chain::storage_keys::pallet_operating_mode_key(C::WITH_CHAIN_GRANDPA_PALLET_NAME) } + async fn source_finality_proofs( + client: &impl Client, + ) -> Result, SubstrateError> { + client.subscribe_grandpa_finality_justifications().await + } + async fn verify_and_optimize_proof( - target_client: &Client, + target_client: &impl Client, header: &C::Header, proof: &mut Self::FinalityProof, ) -> Result { @@ -239,7 +244,7 @@ impl Engine for Grandpa { /// Prepare initialization data for the GRANDPA verifier pallet. async fn prepare_initialization_data( - source_client: Client, + source_client: impl Client, ) -> Result, BlockNumberOf>> { // In ideal world we just need to get best finalized header and then to read GRANDPA // authorities set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at @@ -248,17 +253,14 @@ impl Engine for Grandpa { // But now there are problems with this approach - `CurrentSetId` may return invalid value. // So here we're waiting for the next justification, read the authorities set and then try // to figure out the set id with bruteforce. - let justifications = Self::source_finality_proofs(&source_client) + let mut justifications = Self::source_finality_proofs(&source_client) .await .map_err(|err| Error::Subscribe(C::NAME, err))?; // Read next justification - the header that it finalizes will be used as initial header. let justification = justifications .next() .await - .map_err(|e| Error::ReadJustification(C::NAME, e)) - .and_then(|justification| { - justification.ok_or(Error::ReadJustificationStreamEnded(C::NAME)) - })?; + .ok_or(Error::ReadJustificationStreamEnded(C::NAME))?; // Read initial header. let justification: GrandpaJustification = @@ -359,14 +361,14 @@ impl Engine for Grandpa { } async fn finality_verification_context( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result { let current_authority_set_key = bp_header_chain::storage_keys::current_authority_set_key( C::WITH_CHAIN_GRANDPA_PALLET_NAME, ); let authority_set: AuthoritySet = target_client - .storage_value(current_authority_set_key, Some(at)) + .storage_value(at, current_authority_set_key) .await? .map(Ok) .unwrap_or(Err(SubstrateError::Custom(format!( @@ -385,11 +387,11 @@ impl Engine for Grandpa { } async fn synced_headers_finality_info( - target_client: &Client, + target_client: &impl Client, at: TargetChain::Hash, ) -> Result>>, SubstrateError> { let stored_headers_grandpa_info: Vec>> = target_client - .typed_state_call(C::SYNCED_HEADERS_GRANDPA_INFO_METHOD.to_string(), (), Some(at)) + .state_call(at, C::SYNCED_HEADERS_GRANDPA_INFO_METHOD.to_string(), ()) .await?; let mut headers_grandpa_info = vec![]; @@ -407,7 +409,7 @@ impl Engine for Grandpa { } async fn generate_source_key_ownership_proof( - source_client: &Client, + source_client: &impl Client, at: C::Hash, equivocation: &Self::EquivocationProof, ) -> Result { diff --git a/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs b/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs index 825960b1b3ef2cc4f73b7565d6a2c8fe3e30fdd9..71d15ca3868e04da680f83387ac792b3a02a5f24 100644 --- a/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs @@ -50,11 +50,11 @@ pub type SubstrateFinalityProofsStream

= /// Subscribe to new finality proofs. pub async fn finality_proofs( - client: &Client, + client: &impl Client, ) -> Result, Error> { Ok(unfold( P::FinalityEngine::source_finality_proofs(client).await?, - move |subscription| async move { + move |mut subscription| async move { loop { let log_error = |err| { log::error!( @@ -65,8 +65,7 @@ pub async fn finality_proofs( ); }; - let next_justification = - subscription.next().await.map_err(|err| log_error(err.to_string())).ok()??; + let next_justification = subscription.next().await?; let decoded_justification = >::FinalityProof::decode( @@ -93,7 +92,7 @@ pub async fn finality_proofs( /// /// The runtime API method should be `FinalityApi::best_finalized()`. pub async fn best_synced_header_id( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result>, Error> where @@ -102,6 +101,6 @@ where { // now let's read id of best finalized peer header at our best finalized block target_client - .typed_state_call(SourceChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), Some(at)) + .state_call(at, SourceChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), ()) .await } diff --git a/bridges/relays/lib-substrate-relay/src/lib.rs b/bridges/relays/lib-substrate-relay/src/lib.rs index b3e8e7ed9a2059bb07134640aa4e0bc98494a6a1..c004540a9f4951f3aeeb43a91c74da68a3ff1064 100644 --- a/bridges/relays/lib-substrate-relay/src/lib.rs +++ b/bridges/relays/lib-substrate-relay/src/lib.rs @@ -30,10 +30,7 @@ pub mod equivocation; pub mod error; pub mod finality; pub mod finality_base; -pub mod messages_lane; -pub mod messages_metrics; -pub mod messages_source; -pub mod messages_target; +pub mod messages; pub mod on_demand; pub mod parachains; @@ -130,3 +127,17 @@ impl BatchCallBuilder for () { unreachable!("never called, because ()::new_builder() returns None; qed") } } + +/// Module for handling storage proofs compatibility. +pub mod proofs { + use bp_runtime::{HashOf, RawStorageProof}; + use relay_substrate_client::Chain; + use sp_trie::StorageProof; + + /// Converts proof to `RawStorageProof` type. + pub fn to_raw_storage_proof( + proof: (StorageProof, HashOf), + ) -> RawStorageProof { + proof.0.into_iter_nodes().collect() + } +} diff --git a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs b/bridges/relays/lib-substrate-relay/src/messages/metrics.rs similarity index 99% rename from bridges/relays/lib-substrate-relay/src/messages_metrics.rs rename to bridges/relays/lib-substrate-relay/src/messages/metrics.rs index b30e75bd8bacbbd25c056eb7d499cc18d040f991..8845f43dcb62aa778b50354141381290fc0b897d 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/metrics.rs @@ -36,7 +36,7 @@ use std::{fmt::Debug, marker::PhantomData}; /// Add relay accounts balance metrics. pub async fn add_relay_balances_metrics( - client: Client, + client: impl Client, metrics: &MetricsParams, relay_accounts: &Vec>>, lanes: &[LaneId], diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages/mod.rs similarity index 59% rename from bridges/relays/lib-substrate-relay/src/messages_lane.rs rename to bridges/relays/lib-substrate-relay/src/messages/mod.rs index 08550d19bae03aaf955c81800267cd80f9ce0f20..e52b7020666941c9a8d9937655496acf451c5379 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/mod.rs @@ -17,20 +17,21 @@ //! Tools for supporting message lanes between two Substrate-based chains. use crate::{ - messages_source::{SubstrateMessagesProof, SubstrateMessagesSource}, - messages_target::{SubstrateMessagesDeliveryProof, SubstrateMessagesTarget}, + messages::{ + source::{SubstrateMessagesProof, SubstrateMessagesSource}, + target::{SubstrateMessagesDeliveryProof, SubstrateMessagesTarget}, + }, on_demand::OnDemandRelay, BatchCallBuilder, BatchCallBuilderConstructor, TransactionParams, }; use async_std::sync::Arc; -use bp_messages::{ChainWithMessages as _, LaneId, MessageNonce}; +use bp_messages::{ + target_chain::FromBridgedChainMessagesProof, ChainWithMessages as _, LaneId, MessageNonce, +}; use bp_runtime::{ AccountIdOf, Chain as _, EncodedOrDecodedCall, HeaderIdOf, TransactionEra, WeightExtraOps, }; -use bridge_runtime_common::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, -}; use codec::Encode; use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; use messages_relay::{message_lane::MessageLane, message_lane_loop::BatchTransaction}; @@ -48,6 +49,10 @@ use sp_core::Pair; use sp_runtime::traits::Zero; use std::{fmt::Debug, marker::PhantomData, ops::RangeInclusive}; +pub mod metrics; +pub mod source; +pub mod target; + /// Substrate -> Substrate messages synchronization pipeline. pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { /// Messages of this chain are relayed to the `TargetChain`. @@ -88,13 +93,13 @@ impl MessageLane for MessageLaneAdapter

{ } /// Substrate <-> Substrate messages relay parameters. -pub struct MessagesRelayParams { +pub struct MessagesRelayParams { /// Messages source client. - pub source_client: Client, + pub source_client: SourceClnt, /// Source transaction params. pub source_transaction_params: TransactionParams>, /// Messages target client. - pub target_client: Client, + pub target_client: TargetClnt, /// Target transaction params. pub target_transaction_params: TransactionParams>, /// Optional on-demand source to target headers relay. @@ -179,8 +184,13 @@ impl>> } /// Run Substrate-to-Substrate messages sync loop. -pub async fn run(params: MessagesRelayParams

) -> anyhow::Result<()> +pub async fn run( + params: MessagesRelayParams, +) -> anyhow::Result<()> where + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, AccountIdOf: From< as Pair>::Public>, BalanceOf: TryFrom>, @@ -190,7 +200,7 @@ where let limits = match params.limits { Some(limits) => limits, None => - select_delivery_transaction_limits_rpc::

( + select_delivery_transaction_limits_rpc( ¶ms, P::TargetChain::max_extrinsic_weight(), P::SourceChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, @@ -250,14 +260,14 @@ where max_messages_size_in_single_batch, }, }, - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), params.lane_id, params.source_transaction_params, params.target_to_source_headers_relay, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, params.lane_id, @@ -278,8 +288,8 @@ where /// Deliver range of Substrate-to-Substrate messages. No checks are made to ensure that transaction /// will succeed. pub async fn relay_messages_range( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, target_transaction_params: TransactionParams>, at_source_block: HeaderIdOf, @@ -295,14 +305,14 @@ where let relayer_id_at_source: AccountIdOf = source_transaction_params.signer.public().into(); messages_relay::relay_messages_range( - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), lane_id, source_transaction_params, None, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, lane_id, @@ -321,8 +331,8 @@ where /// Relay messages delivery confirmation of Substrate-to-Substrate messages. /// No checks are made to ensure that transaction will succeed. pub async fn relay_messages_delivery_confirmation( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, at_target_block: HeaderIdOf, lane_id: LaneId, @@ -335,14 +345,14 @@ where let relayer_id_at_source: AccountIdOf = source_transaction_params.signer.public().into(); messages_relay::relay_messages_delivery_confirmation( - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), lane_id, source_transaction_params, None, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, lane_id, @@ -378,11 +388,10 @@ pub struct DirectReceiveMessagesProofCallBuilder { impl ReceiveMessagesProofCallBuilder

for DirectReceiveMessagesProofCallBuilder where P: SubstrateMessageLane, - R: BridgeMessagesConfig>, + R: BridgeMessagesConfig, I: 'static, - R::SourceHeaderChain: bp_messages::target_chain::SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof>, - >, + R::BridgedChain: + bp_runtime::Chain, Hash = HashOf>, CallOf: From> + GetDispatchInfo, { fn build_receive_messages_proof_call( @@ -394,7 +403,7 @@ where ) -> CallOf { let call: CallOf = BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain: relayer_id_at_source, - proof: proof.1, + proof: proof.1.into(), messages_count, dispatch_weight, } @@ -427,26 +436,26 @@ macro_rules! generate_receive_message_proof_call_builder { ($pipeline:ident, $mocked_builder:ident, $bridge_messages:path, $receive_messages_proof:path) => { pub struct $mocked_builder; - impl $crate::messages_lane::ReceiveMessagesProofCallBuilder<$pipeline> + impl $crate::messages::ReceiveMessagesProofCallBuilder<$pipeline> for $mocked_builder { fn build_receive_messages_proof_call( relayer_id_at_source: relay_substrate_client::AccountIdOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain + <$pipeline as $crate::messages::SubstrateMessageLane>::SourceChain >, - proof: $crate::messages_source::SubstrateMessagesProof< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain + proof: $crate::messages::source::SubstrateMessagesProof< + <$pipeline as $crate::messages::SubstrateMessageLane>::SourceChain >, messages_count: u32, dispatch_weight: bp_messages::Weight, _trace_call: bool, ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::TargetChain + <$pipeline as $crate::messages::SubstrateMessageLane>::TargetChain > { bp_runtime::paste::item! { $bridge_messages($receive_messages_proof { relayer_id_at_bridged_chain: relayer_id_at_source, - proof: proof.1, + proof: proof.1.into(), messages_count: messages_count, dispatch_weight: dispatch_weight, }) @@ -478,11 +487,7 @@ where P: SubstrateMessageLane, R: BridgeMessagesConfig, I: 'static, - R::TargetHeaderChain: bp_messages::source_chain::TargetHeaderChain< - R::OutboundPayload, - R::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof>, - >, + R::BridgedChain: bp_runtime::Chain>, CallOf: From> + GetDispatchInfo, { fn build_receive_messages_delivery_proof_call( @@ -491,7 +496,7 @@ where ) -> CallOf { let call: CallOf = BridgeMessagesCall::::receive_messages_delivery_proof { - proof: proof.1, + proof: proof.1.into(), relayers_state: proof.0, } .into(); @@ -523,16 +528,16 @@ macro_rules! generate_receive_message_delivery_proof_call_builder { ($pipeline:ident, $mocked_builder:ident, $bridge_messages:path, $receive_messages_delivery_proof:path) => { pub struct $mocked_builder; - impl $crate::messages_lane::ReceiveMessagesDeliveryProofCallBuilder<$pipeline> + impl $crate::messages::ReceiveMessagesDeliveryProofCallBuilder<$pipeline> for $mocked_builder { fn build_receive_messages_delivery_proof_call( - proof: $crate::messages_target::SubstrateMessagesDeliveryProof< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::TargetChain + proof: $crate::messages::target::SubstrateMessagesDeliveryProof< + <$pipeline as $crate::messages::SubstrateMessageLane>::TargetChain >, _trace_call: bool, ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain + <$pipeline as $crate::messages::SubstrateMessageLane>::SourceChain > { bp_runtime::paste::item! { $bridge_messages($receive_messages_delivery_proof { @@ -546,12 +551,15 @@ macro_rules! generate_receive_message_delivery_proof_call_builder { } /// Returns maximal number of messages and their maximal cumulative dispatch weight. -async fn select_delivery_transaction_limits_rpc( - params: &MessagesRelayParams

, +async fn select_delivery_transaction_limits_rpc( + params: &MessagesRelayParams, max_extrinsic_weight: Weight, max_unconfirmed_messages_at_inbound_lane: MessageNonce, ) -> anyhow::Result where + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, { // We may try to guess accurate value, based on maximal number of messages and per-message @@ -567,20 +575,21 @@ where let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; // weight of empty message delivery with outbound lane state - let delivery_tx_with_zero_messages = dummy_messages_delivery_transaction::

(params, 0)?; + let best_target_block_hash = params.target_client.best_header_hash().await?; + let delivery_tx_with_zero_messages = dummy_messages_delivery_transaction::(params, 0)?; let delivery_tx_with_zero_messages_weight = params .target_client - .extimate_extrinsic_weight(delivery_tx_with_zero_messages) + .estimate_extrinsic_weight(best_target_block_hash, delivery_tx_with_zero_messages) .await .map_err(|e| { anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) })?; // weight of single message delivery with outbound lane state - let delivery_tx_with_one_message = dummy_messages_delivery_transaction::

(params, 1)?; + let delivery_tx_with_one_message = dummy_messages_delivery_transaction::(params, 1)?; let delivery_tx_with_one_message_weight = params .target_client - .extimate_extrinsic_weight(delivery_tx_with_one_message) + .estimate_extrinsic_weight(best_target_block_hash, delivery_tx_with_one_message) .await .map_err(|e| { anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) @@ -615,8 +624,8 @@ where } /// Returns dummy message delivery transaction with zero messages and `1kb` proof. -fn dummy_messages_delivery_transaction( - params: &MessagesRelayParams

, +fn dummy_messages_delivery_transaction( + params: &MessagesRelayParams, messages: u32, ) -> anyhow::Result<::SignedTransaction> where @@ -634,13 +643,7 @@ where Weight::zero(), FromBridgedChainMessagesProof { bridged_header_hash: Default::default(), - // we may use per-chain `EXTRA_STORAGE_PROOF_SIZE`, but since we don't need - // exact values, this global estimation is fine - storage_proof: vec![vec![ - 42u8; - pallet_bridge_messages::EXTRA_STORAGE_PROOF_SIZE - as usize - ]], + storage_proof: Default::default(), lane: Default::default(), nonces_start: 1, nonces_end: messages as u64, @@ -666,3 +669,362 @@ where ) .map_err(Into::into) } + +#[cfg(test)] +mod tests { + use super::*; + use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, UnrewardedRelayersState, + }; + use relay_substrate_client::calls::{UtilityCall as MockUtilityCall, UtilityCall}; + + #[derive(codec::Decode, codec::Encode, Clone, Debug, PartialEq)] + pub enum RuntimeCall { + #[codec(index = 53)] + BridgeMessages(CodegenBridgeMessagesCall), + #[codec(index = 123)] + Utility(UtilityCall), + } + pub type CodegenBridgeMessagesCall = bp_messages::BridgeMessagesCall< + u64, + Box>, + FromBridgedChainMessagesDeliveryProof, + >; + + impl From> for RuntimeCall { + fn from(value: MockUtilityCall) -> RuntimeCall { + match value { + MockUtilityCall::batch_all(calls) => + RuntimeCall::Utility(UtilityCall::::batch_all(calls)), + } + } + } + + #[test] + fn ensure_macro_compatibility_for_generate_receive_message_proof_call_builder() { + // data + let receive_messages_proof = FromBridgedChainMessagesProof { + bridged_header_hash: Default::default(), + storage_proof: Default::default(), + lane: LaneId([0, 0, 0, 0]), + nonces_start: 0, + nonces_end: 0, + }; + let account = 1234; + let messages_count = 0; + let dispatch_weight = Default::default(); + + // construct pallet Call directly + let pallet_receive_messages_proof = + pallet_bridge_messages::Call::::receive_messages_proof { + relayer_id_at_bridged_chain: account, + proof: receive_messages_proof.clone().into(), + messages_count, + dispatch_weight, + }; + + // construct mock enum Call + let mock_enum_receive_messages_proof = CodegenBridgeMessagesCall::receive_messages_proof { + relayer_id_at_bridged_chain: account, + proof: receive_messages_proof.clone().into(), + messages_count, + dispatch_weight, + }; + + // now we should be able to use macro `generate_receive_message_proof_call_builder` + let relayer_call_builder_receive_messages_proof = relayer::ThisChainToBridgedChainMessageLaneReceiveMessagesProofCallBuilder::build_receive_messages_proof_call( + account, + (Default::default(), receive_messages_proof), + messages_count, + dispatch_weight, + false, + ); + + // ensure they are all equal + assert_eq!( + pallet_receive_messages_proof.encode(), + mock_enum_receive_messages_proof.encode() + ); + match relayer_call_builder_receive_messages_proof { + RuntimeCall::BridgeMessages(call) => match call { + call @ CodegenBridgeMessagesCall::receive_messages_proof { .. } => + assert_eq!(pallet_receive_messages_proof.encode(), call.encode()), + _ => panic!("Unexpected CodegenBridgeMessagesCall type"), + }, + _ => panic!("Unexpected RuntimeCall type"), + }; + } + + #[test] + fn ensure_macro_compatibility_for_generate_receive_message_delivery_proof_call_builder() { + // data + let receive_messages_delivery_proof = FromBridgedChainMessagesDeliveryProof { + bridged_header_hash: Default::default(), + storage_proof: Default::default(), + lane: LaneId([0, 0, 0, 0]), + }; + let relayers_state = UnrewardedRelayersState { + unrewarded_relayer_entries: 0, + messages_in_oldest_entry: 0, + total_messages: 0, + last_delivered_nonce: 0, + }; + + // construct pallet Call directly + let pallet_receive_messages_delivery_proof = + pallet_bridge_messages::Call::::receive_messages_delivery_proof { + proof: receive_messages_delivery_proof.clone(), + relayers_state: relayers_state.clone(), + }; + + // construct mock enum Call + let mock_enum_receive_messages_delivery_proof = + CodegenBridgeMessagesCall::receive_messages_delivery_proof { + proof: receive_messages_delivery_proof.clone(), + relayers_state: relayers_state.clone(), + }; + + // now we should be able to use macro `generate_receive_message_proof_call_builder` + let relayer_call_builder_receive_messages_delivery_proof = relayer::ThisChainToBridgedChainMessageLaneReceiveMessagesDeliveryProofCallBuilder::build_receive_messages_delivery_proof_call( + (relayers_state, receive_messages_delivery_proof), + false, + ); + + // ensure they are all equal + assert_eq!( + pallet_receive_messages_delivery_proof.encode(), + mock_enum_receive_messages_delivery_proof.encode() + ); + match relayer_call_builder_receive_messages_delivery_proof { + RuntimeCall::BridgeMessages(call) => match call { + call @ CodegenBridgeMessagesCall::receive_messages_delivery_proof { .. } => + assert_eq!(pallet_receive_messages_delivery_proof.encode(), call.encode()), + _ => panic!("Unexpected CodegenBridgeMessagesCall type"), + }, + _ => panic!("Unexpected RuntimeCall type"), + }; + } + + // mock runtime with `pallet_bridge_messages` + mod mock { + use super::super::*; + use bp_messages::target_chain::ForbidInboundMessages; + use bp_runtime::ChainId; + use frame_support::derive_impl; + use sp_core::H256; + use sp_runtime::{ + generic, testing::Header as SubstrateHeader, traits::BlakeTwo256, StateVersion, + }; + + type Block = frame_system::mocking::MockBlock; + pub type SignedBlock = generic::SignedBlock; + + frame_support::construct_runtime! { + pub enum TestRuntime + { + System: frame_system, + Messages: pallet_bridge_messages, + } + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for TestRuntime { + type Block = Block; + } + + impl pallet_bridge_messages::Config for TestRuntime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type ThisChain = ThisUnderlyingChain; + type BridgedChain = BridgedUnderlyingChain; + type BridgedHeaderChain = BridgedHeaderChain; + type ActiveOutboundLanes = (); + type OutboundPayload = Vec; + type InboundPayload = Vec; + type DeliveryPayments = (); + type DeliveryConfirmationPayments = (); + type OnMessagesDelivered = (); + type MessageDispatch = ForbidInboundMessages>; + } + + pub struct ThisUnderlyingChain; + + impl bp_runtime::Chain for ThisUnderlyingChain { + const ID: ChainId = *b"tuch"; + type BlockNumber = u64; + type Hash = H256; + type Hasher = BlakeTwo256; + type Header = SubstrateHeader; + type AccountId = u64; + type Balance = u64; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { + u32::MAX + } + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } + } + + impl bp_messages::ChainWithMessages for ThisUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; + } + + pub struct BridgedUnderlyingChain; + + pub type BridgedHeaderHash = H256; + pub type BridgedChainHeader = SubstrateHeader; + + impl bp_runtime::Chain for BridgedUnderlyingChain { + const ID: ChainId = *b"bgdc"; + type BlockNumber = u64; + type Hash = BridgedHeaderHash; + type Hasher = BlakeTwo256; + type Header = BridgedChainHeader; + type AccountId = u64; + type Balance = u64; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { + 4096 + } + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } + } + + impl bp_messages::ChainWithMessages for BridgedUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; + } + + pub struct BridgedHeaderChain; + + impl bp_header_chain::HeaderChain for BridgedHeaderChain { + fn finalized_header_state_root( + _hash: HashOf, + ) -> Option> { + unreachable!() + } + } + } + + // relayer configuration + mod relayer { + use super::*; + use crate::{ + messages::{ + tests::{mock, RuntimeCall}, + SubstrateMessageLane, + }, + UtilityPalletBatchCallBuilder, + }; + use bp_runtime::UnderlyingChainProvider; + use relay_substrate_client::{MockedRuntimeUtilityPallet, SignParam, UnsignedTransaction}; + use std::time::Duration; + + #[derive(Clone)] + pub struct ThisChain; + impl UnderlyingChainProvider for ThisChain { + type Chain = mock::ThisUnderlyingChain; + } + impl relay_substrate_client::Chain for ThisChain { + const NAME: &'static str = ""; + const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = ""; + const FREE_HEADERS_INTERVAL_METHOD: &'static str = ""; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); + type SignedBlock = mock::SignedBlock; + type Call = RuntimeCall; + } + impl relay_substrate_client::ChainWithTransactions for ThisChain { + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = (); + + fn sign_transaction( + _: SignParam, + _: UnsignedTransaction, + ) -> Result + where + Self: Sized, + { + todo!() + } + } + impl relay_substrate_client::ChainWithMessages for ThisChain { + const WITH_CHAIN_RELAYERS_PALLET_NAME: Option<&'static str> = None; + const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = ""; + const FROM_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = ""; + } + impl relay_substrate_client::ChainWithUtilityPallet for ThisChain { + type UtilityPallet = MockedRuntimeUtilityPallet; + } + + #[derive(Clone)] + pub struct BridgedChain; + impl UnderlyingChainProvider for BridgedChain { + type Chain = mock::BridgedUnderlyingChain; + } + impl relay_substrate_client::Chain for BridgedChain { + const NAME: &'static str = ""; + const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = ""; + const FREE_HEADERS_INTERVAL_METHOD: &'static str = ""; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); + type SignedBlock = mock::SignedBlock; + type Call = RuntimeCall; + } + impl relay_substrate_client::ChainWithTransactions for BridgedChain { + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = (); + + fn sign_transaction( + _: SignParam, + _: UnsignedTransaction, + ) -> Result + where + Self: Sized, + { + todo!() + } + } + impl relay_substrate_client::ChainWithMessages for BridgedChain { + const WITH_CHAIN_RELAYERS_PALLET_NAME: Option<&'static str> = None; + const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = ""; + const FROM_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = ""; + } + impl relay_substrate_client::ChainWithUtilityPallet for BridgedChain { + type UtilityPallet = MockedRuntimeUtilityPallet; + } + + #[derive(Clone, Debug)] + pub struct ThisChainToBridgedChainMessageLane; + impl SubstrateMessageLane for ThisChainToBridgedChainMessageLane { + type SourceChain = ThisChain; + type TargetChain = BridgedChain; + type ReceiveMessagesProofCallBuilder = + ThisChainToBridgedChainMessageLaneReceiveMessagesProofCallBuilder; + type ReceiveMessagesDeliveryProofCallBuilder = + ThisChainToBridgedChainMessageLaneReceiveMessagesDeliveryProofCallBuilder; + type SourceBatchCallBuilder = UtilityPalletBatchCallBuilder; + type TargetBatchCallBuilder = UtilityPalletBatchCallBuilder; + } + + generate_receive_message_proof_call_builder!( + ThisChainToBridgedChainMessageLane, + ThisChainToBridgedChainMessageLaneReceiveMessagesProofCallBuilder, + RuntimeCall::BridgeMessages, + CodegenBridgeMessagesCall::receive_messages_proof + ); + generate_receive_message_delivery_proof_call_builder!( + ThisChainToBridgedChainMessageLane, + ThisChainToBridgedChainMessageLaneReceiveMessagesDeliveryProofCallBuilder, + RuntimeCall::BridgeMessages, + CodegenBridgeMessagesCall::receive_messages_delivery_proof + ); + } +} diff --git a/bridges/relays/lib-substrate-relay/src/messages_source.rs b/bridges/relays/lib-substrate-relay/src/messages/source.rs similarity index 86% rename from bridges/relays/lib-substrate-relay/src/messages_source.rs rename to bridges/relays/lib-substrate-relay/src/messages/source.rs index 49deff046f9ca0914846d19b18653f85b8cd8554..b75fc86d5eee20d247de3cbc26f324ed46e9ad2d 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_source.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/source.rs @@ -20,11 +20,12 @@ use crate::{ finality_base::best_synced_header_id, - messages_lane::{ + messages::{ BatchProofTransaction, MessageLaneAdapter, ReceiveMessagesDeliveryProofCallBuilder, SubstrateMessageLane, }, on_demand::OnDemandRelay, + proofs::to_raw_storage_proof, TransactionParams, }; @@ -32,11 +33,11 @@ use async_std::sync::Arc; use async_trait::async_trait; use bp_messages::{ storage_keys::{operating_mode_key, outbound_lane_data_key}, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages as _, InboundMessageDetails, LaneId, MessageNonce, MessagePayload, MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails, }; -use bp_runtime::{BasicOperatingMode, HeaderIdProvider}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use bp_runtime::{BasicOperatingMode, HeaderIdProvider, RangeInclusiveExt}; use codec::Encode; use frame_support::weights::Weight; use messages_relay::{ @@ -63,19 +64,21 @@ pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof = Vec<(MessagePayload, &'a mut OutboundMessageDetails)>; /// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { - source_client: Client, - target_client: Client, +pub struct SubstrateMessagesSource { + source_client: SourceClnt, + target_client: TargetClnt, lane_id: LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option>>, } -impl SubstrateMessagesSource

{ +impl, TargetClnt> + SubstrateMessagesSource +{ /// Create new Substrate headers source. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, lane_id: LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option< @@ -98,22 +101,25 @@ impl SubstrateMessagesSource

{ ) -> Result, SubstrateError> { self.source_client .storage_value( + id.hash(), outbound_lane_data_key( P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, ), - Some(id.1), ) .await } /// Ensure that the messages pallet at source chain is active. async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.source_client).await + ensure_messages_pallet_active::(&self.source_client) + .await } } -impl Clone for SubstrateMessagesSource

{ +impl Clone + for SubstrateMessagesSource +{ fn clone(&self) -> Self { Self { source_client: self.source_client.clone(), @@ -126,7 +132,12 @@ impl Clone for SubstrateMessagesSource

{ } #[async_trait] -impl RelayClient for SubstrateMessagesSource

{ +impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for SubstrateMessagesSource +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -150,13 +161,17 @@ impl RelayClient for SubstrateMessagesSource

{ } #[async_trait] -impl SourceClient> for SubstrateMessagesSource

+impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > SourceClient> for SubstrateMessagesSource where AccountIdOf: From< as Pair>::Public>, { type BatchTransaction = BatchProofTransaction; - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn state(&self) -> Result>, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because @@ -169,7 +184,7 @@ where // we can't relay confirmations if messages pallet at source chain is halted self.ensure_pallet_active().await?; - read_client_state(&self.source_client, Some(&self.target_client)).await + read_client_state_from_both_chains(&self.source_client, &self.target_client).await } async fn latest_generated_nonce( @@ -203,12 +218,12 @@ where id: SourceHeaderIdOf>, nonces: RangeInclusive, ) -> Result>, SubstrateError> { - let mut out_msgs_details = self + let mut out_msgs_details: Vec<_> = self .source_client - .typed_state_call::<_, Vec<_>>( + .state_call::<_, Vec<_>>( + id.hash(), P::TargetChain::TO_CHAIN_MESSAGE_DETAILS_METHOD.into(), (self.lane_id, *nonces.start(), *nonces.end()), - Some(id.1), ) .await?; validate_out_msgs_details::(&out_msgs_details, nonces)?; @@ -226,7 +241,7 @@ where out_msg_details.nonce, ); let msg_payload: MessagePayload = - self.source_client.storage_value(msg_key, Some(id.1)).await?.ok_or_else(|| { + self.source_client.storage_value(id.hash(), msg_key).await?.ok_or_else(|| { SubstrateError::Custom(format!( "Message to {} {:?}/{} is missing from runtime the storage of {} at {:?}", P::TargetChain::NAME, @@ -240,15 +255,16 @@ where msgs_to_refine.push((msg_payload, out_msg_details)); } + let best_target_header_hash = self.target_client.best_header_hash().await?; for mut msgs_to_refine_batch in split_msgs_to_refine::(self.lane_id, msgs_to_refine)? { let in_msgs_details = self .target_client - .typed_state_call::<_, Vec>( + .state_call::<_, Vec>( + best_target_header_hash, P::SourceChain::FROM_CHAIN_MESSAGE_DETAILS_METHOD.into(), (self.lane_id, &msgs_to_refine_batch), - None, ) .await?; if in_msgs_details.len() != msgs_to_refine_batch.len() { @@ -305,34 +321,27 @@ where ), SubstrateError, > { - let mut storage_keys = - Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1); - let mut message_nonce = *nonces.start(); - while message_nonce <= *nonces.end() { + let mut storage_keys = Vec::with_capacity(nonces.saturating_len() as usize); + for message_nonce in nonces.clone() { let message_key = bp_messages::storage_keys::message_key( P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, message_nonce, ); storage_keys.push(message_key); - message_nonce += 1; } if proof_parameters.outbound_state_proof_required { - storage_keys.push(bp_messages::storage_keys::outbound_lane_data_key( + storage_keys.push(outbound_lane_data_key( P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, )); } - let proof = self - .source_client - .prove_storage(storage_keys, id.1) - .await? - .into_iter_nodes() - .collect(); + let storage_proof = + self.source_client.prove_storage(id.hash(), storage_keys.clone()).await?; let proof = FromBridgedChainMessagesProof { bridged_header_hash: id.1, - storage_proof: proof, + storage_proof: to_raw_storage_proof::(storage_proof), lane: self.lane_id, nonces_start: *nonces.start(), nonces_end: *nonces.end(), @@ -387,15 +396,19 @@ where } /// Ensure that the messages pallet at source chain is active. -pub(crate) async fn ensure_messages_pallet_active( - client: &Client, +pub(crate) async fn ensure_messages_pallet_active( + client: &AtChainClient, ) -> Result<(), SubstrateError> where AtChain: ChainWithMessages, WithChain: ChainWithMessages, + AtChainClient: Client, { let operating_mode = client - .storage_value(operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), None) + .storage_value( + client.best_header_hash().await?, + operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), + ) .await?; let is_halted = operating_mode == Some(MessagesOperatingMode::Basic(BasicOperatingMode::Halted)); @@ -412,11 +425,10 @@ where /// bridge GRANDPA pallet deployed and it provides `best_finalized_header_id_method_name` /// runtime API to read the best finalized Bridged chain header. /// -/// If `peer_client` is `None`, the value of `actual_best_finalized_peer_at_best_self` will -/// always match the `best_finalized_peer_at_best_self`. +/// The value of `actual_best_finalized_peer_at_best_self` will always match +/// the `best_finalized_peer_at_best_self`. pub async fn read_client_state( - self_client: &Client, - peer_client: Option<&Client>, + self_client: &impl Client, ) -> Result, HeaderIdOf>, SubstrateError> where SelfChain: Chain, @@ -431,30 +443,42 @@ where let peer_on_self_best_finalized_id = best_synced_header_id::(self_client, self_best_id.hash()).await?; - // read actual header, matching the `peer_on_self_best_finalized_id` from the peer chain - let actual_peer_on_self_best_finalized_id = - match (peer_client, peer_on_self_best_finalized_id.as_ref()) { - (Some(peer_client), Some(peer_on_self_best_finalized_id)) => { - let actual_peer_on_self_best_finalized = - peer_client.header_by_number(peer_on_self_best_finalized_id.number()).await?; - Some(actual_peer_on_self_best_finalized.id()) - }, - _ => peer_on_self_best_finalized_id, - }; - Ok(ClientState { best_self: self_best_id, best_finalized_self: self_best_finalized_id, best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, - actual_best_finalized_peer_at_best_self: actual_peer_on_self_best_finalized_id, + actual_best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, }) } +/// Does the same stuff as `read_client_state`, but properly fills the +/// `actual_best_finalized_peer_at_best_self` field of the result. +pub async fn read_client_state_from_both_chains( + self_client: &impl Client, + peer_client: &impl Client, +) -> Result, HeaderIdOf>, SubstrateError> +where + SelfChain: Chain, + PeerChain: Chain, +{ + let mut client_state = read_client_state::(self_client).await?; + client_state.actual_best_finalized_peer_at_best_self = + match client_state.best_finalized_peer_at_best_self.as_ref() { + Some(peer_on_self_best_finalized_id) => { + let actual_peer_on_self_best_finalized = + peer_client.header_by_number(peer_on_self_best_finalized_id.number()).await?; + Some(actual_peer_on_self_best_finalized.id()) + }, + _ => client_state.best_finalized_peer_at_best_self, + }; + Ok(client_state) +} + /// Reads best `PeerChain` header known to the `SelfChain` using provided runtime API method. /// /// Method is supposed to be the `FinalityApi::best_finalized()` method. pub async fn best_finalized_peer_header_at_self( - self_client: &Client, + self_client: &impl Client, at_self_hash: HashOf, ) -> Result>, SubstrateError> where @@ -463,10 +487,10 @@ where { // now let's read id of best finalized peer header at our best finalized block self_client - .typed_state_call::<_, Option<_>>( + .state_call::<_, Option<_>>( + at_self_hash, PeerChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), - Some(at_self_hash), ) .await } diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages/target.rs similarity index 84% rename from bridges/relays/lib-substrate-relay/src/messages_target.rs rename to bridges/relays/lib-substrate-relay/src/messages/target.rs index 5ffb2b6c771e0fec2bf44640993abba3706cb0a5..a6bf169cffb67ae149d9d4c8c5a2348dc18f5b39 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/target.rs @@ -19,22 +19,25 @@ //! `` chain. use crate::{ - messages_lane::{ + messages::{ + source::{ + ensure_messages_pallet_active, read_client_state_from_both_chains, + SubstrateMessagesProof, + }, BatchProofTransaction, MessageLaneAdapter, ReceiveMessagesProofCallBuilder, SubstrateMessageLane, }, - messages_source::{ensure_messages_pallet_active, read_client_state, SubstrateMessagesProof}, on_demand::OnDemandRelay, + proofs::to_raw_storage_proof, TransactionParams, }; use async_std::sync::Arc; use async_trait::async_trait; use bp_messages::{ - storage_keys::inbound_lane_data_key, ChainWithMessages as _, InboundLaneData, LaneId, - MessageNonce, UnrewardedRelayersState, + source_chain::FromBridgedChainMessagesDeliveryProof, storage_keys::inbound_lane_data_key, + ChainWithMessages as _, InboundLaneData, LaneId, MessageNonce, UnrewardedRelayersState, }; -use bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof; use messages_relay::{ message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, message_lane_loop::{NoncesSubmitArtifacts, TargetClient, TargetClientState}, @@ -45,27 +48,31 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; -use std::ops::RangeInclusive; +use std::{convert::TryFrom, ops::RangeInclusive}; /// Message receiving proof returned by the target Substrate node. pub type SubstrateMessagesDeliveryProof = (UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof>); /// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { - target_client: Client, - source_client: Client, +pub struct SubstrateMessagesTarget { + target_client: TargetClnt, + source_client: SourceClnt, lane_id: LaneId, relayer_id_at_source: AccountIdOf, transaction_params: Option>>, source_to_target_headers_relay: Option>>, } -impl SubstrateMessagesTarget

{ +impl SubstrateMessagesTarget +where + P: SubstrateMessageLane, + TargetClnt: Client, +{ /// Create new Substrate headers target. pub fn new( - target_client: Client, - source_client: Client, + target_client: TargetClnt, + source_client: SourceClnt, lane_id: LaneId, relayer_id_at_source: AccountIdOf, transaction_params: Option>>, @@ -90,22 +97,25 @@ impl SubstrateMessagesTarget

{ ) -> Result>>, SubstrateError> { self.target_client .storage_value( + id.hash(), inbound_lane_data_key( P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, ), - Some(id.1), ) .await } /// Ensure that the messages pallet at target chain is active. async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.target_client).await + ensure_messages_pallet_active::(&self.target_client) + .await } } -impl Clone for SubstrateMessagesTarget

{ +impl Clone + for SubstrateMessagesTarget +{ fn clone(&self) -> Self { Self { target_client: self.target_client.clone(), @@ -119,7 +129,12 @@ impl Clone for SubstrateMessagesTarget

{ } #[async_trait] -impl RelayClient for SubstrateMessagesTarget

{ +impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for SubstrateMessagesTarget +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -143,14 +158,18 @@ impl RelayClient for SubstrateMessagesTarget

{ } #[async_trait] -impl TargetClient> for SubstrateMessagesTarget

+impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > TargetClient> for SubstrateMessagesTarget where AccountIdOf: From< as Pair>::Public>, BalanceOf: TryFrom>, { type BatchTransaction = BatchProofTransaction; - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn state(&self) -> Result>, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because @@ -163,7 +182,7 @@ where // we can't relay messages if messages pallet at target chain is halted self.ensure_pallet_active().await?; - read_client_state(&self.target_client, Some(&self.source_client)).await + read_client_state_from_both_chains(&self.target_client, &self.source_client).await } async fn latest_received_nonce( @@ -213,19 +232,16 @@ where SubstrateError, > { let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; - let inbound_data_key = bp_messages::storage_keys::inbound_lane_data_key( + let storage_keys = vec![inbound_lane_data_key( P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, - ); - let proof = self - .target_client - .prove_storage(vec![inbound_data_key], id.1) - .await? - .into_iter_nodes() - .collect(); + )]; + + let storage_proof = + self.target_client.prove_storage(id.hash(), storage_keys.clone()).await?; let proof = FromBridgedChainMessagesDeliveryProof { bridged_header_hash: id.1, - storage_proof: proof, + storage_proof: to_raw_storage_proof::(storage_proof), lane: self.lane_id, }; Ok((id, (relayers_state, proof))) diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs index 202f53ea4e4f50510f125f28da86de878125d581..d18c582dfac4340a72f1bebd2eea3cabd0563dbe 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs @@ -53,25 +53,30 @@ use crate::{ /// relay) needs it to continue its regular work. When enough headers are relayed, on-demand stops /// syncing headers. #[derive(Clone)] -pub struct OnDemandHeadersRelay { +pub struct OnDemandHeadersRelay { /// Relay task name. relay_task_name: String, /// Shared reference to maximal required finalized header number. required_header_number: RequiredHeaderNumberRef, /// Client of the source chain. - source_client: Client, + source_client: SourceClnt, /// Client of the target chain. - target_client: Client, + target_client: TargetClnt, } -impl OnDemandHeadersRelay

{ +impl< + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, + TargetClnt: Client, + > OnDemandHeadersRelay +{ /// Create new on-demand headers relay. /// /// If `metrics_params` is `Some(_)`, the metrics of the finality relay are registered. /// Otherwise, all required metrics must be exposed outside of this method. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, target_transaction_params: TransactionParams>, headers_to_relay: HeadersToRelay, metrics_params: Option, @@ -104,8 +109,12 @@ impl OnDemandHeadersRelay

{ } #[async_trait] -impl OnDemandRelay - for OnDemandHeadersRelay

+impl< + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, + TargetClnt: Client, + > OnDemandRelay + for OnDemandHeadersRelay { async fn reconnect(&self) -> Result<(), SubstrateError> { // using clone is fine here (to avoid mut requirement), because clone on Client clones @@ -139,7 +148,7 @@ impl OnDemandRelay::new(self.source_client.clone(), None); + SubstrateFinalitySource::::new(self.source_client.clone(), None); let (header, mut proof) = finality_source.prove_block_finality(current_required_header).await?; let header_id = header.id(); @@ -198,8 +207,8 @@ impl OnDemandRelay( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_transaction_params: TransactionParams>, headers_to_relay: HeadersToRelay, required_header_number: RequiredHeaderNumberRef, @@ -209,7 +218,7 @@ async fn background_task( { let relay_task_name = on_demand_headers_relay_name::(); let target_transactions_mortality = target_transaction_params.mortality; - let mut finality_source = SubstrateFinalitySource::

::new( + let mut finality_source = SubstrateFinalitySource::::new( source_client.clone(), Some(required_header_number.clone()), ); @@ -246,7 +255,8 @@ async fn background_task( // read best finalized source header number from target let best_finalized_source_header_at_target = - best_finalized_source_header_at_target::

(&finality_target, &relay_task_name).await; + best_finalized_source_header_at_target::(&finality_target, &relay_task_name) + .await; if matches!(best_finalized_source_header_at_target, Err(ref e) if e.is_connection_error()) { relay_utils::relay_loop::reconnect_failed_client( FailedClient::Target, @@ -410,13 +420,17 @@ async fn mandatory_headers_scan_range( /// it. /// /// Returns `true` if header was found and (asked to be) relayed and `false` otherwise. -async fn relay_mandatory_header_from_range( - finality_source: &SubstrateFinalitySource

, +async fn relay_mandatory_header_from_range( + finality_source: &SubstrateFinalitySource, required_header_number: &RequiredHeaderNumberRef, best_finalized_source_header_at_target: String, range: (BlockNumberOf, BlockNumberOf), relay_task_name: &str, -) -> Result { +) -> Result +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ // search for mandatory header first let mandatory_source_header_number = find_mandatory_header_in_range(finality_source, range).await?; @@ -451,10 +465,14 @@ async fn relay_mandatory_header_from_range( /// Read best finalized source block number from source client. /// /// Returns `None` if we have failed to read the number. -async fn best_finalized_source_header_at_source( - finality_source: &SubstrateFinalitySource

, +async fn best_finalized_source_header_at_source( + finality_source: &SubstrateFinalitySource, relay_task_name: &str, -) -> Result, relay_substrate_client::Error> { +) -> Result, relay_substrate_client::Error> +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ finality_source.on_chain_best_finalized_block_number().await.map_err(|error| { log::error!( target: "bridge", @@ -470,11 +488,16 @@ async fn best_finalized_source_header_at_source( - finality_target: &SubstrateFinalityTarget

, +async fn best_finalized_source_header_at_target( + finality_target: &SubstrateFinalityTarget, relay_task_name: &str, -) -> Result, as RelayClient>::Error> +) -> Result< + BlockNumberOf, + as RelayClient>::Error, +> where + P: SubstrateFinalitySyncPipeline, + TargetClnt: Client, AccountIdOf: From< as sp_core::Pair>::Public>, { finality_target @@ -496,10 +519,14 @@ where /// Read first mandatory header in given inclusive range. /// /// Returns `Ok(None)` if there were no mandatory headers in the range. -async fn find_mandatory_header_in_range( - finality_source: &SubstrateFinalitySource

, +async fn find_mandatory_header_in_range( + finality_source: &SubstrateFinalitySource, range: (BlockNumberOf, BlockNumberOf), -) -> Result>, relay_substrate_client::Error> { +) -> Result>, relay_substrate_client::Error> +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ let mut current = range.0; while current <= range.1 { let header = finality_source.client().header_by_number(current).await?; diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs index 966bdc3107203a61cf405adba2cf09124330954e..4579222a2c681c49e076f67d2eacaeb1dc8b9fca 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs @@ -17,7 +17,7 @@ //! On-demand Substrate -> Substrate parachain finality relay. use crate::{ - messages_source::best_finalized_peer_header_at_self, + messages::source::best_finalized_peer_header_at_self, on_demand::OnDemandRelay, parachains::{ source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter, @@ -53,29 +53,34 @@ use std::fmt::Debug; /// (e.g. messages relay) needs it to continue its regular work. When enough parachain headers /// are relayed, on-demand stops syncing headers. #[derive(Clone)] -pub struct OnDemandParachainsRelay { +pub struct OnDemandParachainsRelay { /// Relay task name. relay_task_name: String, /// Channel used to communicate with background task and ask for relay of parachain heads. required_header_number_sender: Sender>, /// Source relay chain client. - source_relay_client: Client, + source_relay_client: SourceRelayClnt, /// Target chain client. - target_client: Client, + target_client: TargetClnt, /// On-demand relay chain relay. on_demand_source_relay_to_target_headers: Arc>, } -impl OnDemandParachainsRelay

{ +impl< + P: SubstrateParachainsPipeline, + SourceRelayClnt: Client, + TargetClnt: Client, + > OnDemandParachainsRelay +{ /// Create new on-demand parachains relay. /// /// Note that the argument is the source relay chain client, not the parachain client. /// That's because parachain finality is determined by the relay chain and we don't /// need to connect to the parachain itself here. pub fn new( - source_relay_client: Client, - target_client: Client, + source_relay_client: SourceRelayClnt, + target_client: TargetClnt, target_transaction_params: TransactionParams>, on_demand_source_relay_to_target_headers: Arc< dyn OnDemandRelay, @@ -114,10 +119,13 @@ impl OnDemandParachainsRelay

{ } #[async_trait] -impl OnDemandRelay - for OnDemandParachainsRelay

+impl + OnDemandRelay + for OnDemandParachainsRelay where P::SourceParachain: Chain, + SourceRelayClnt: Client, + TargetClnt: Client, { async fn reconnect(&self) -> Result<(), SubstrateError> { // using clone is fine here (to avoid mut requirement), because clone on Client clones @@ -147,7 +155,7 @@ where required_parachain_header: BlockNumberOf, ) -> Result<(HeaderIdOf, Vec>), SubstrateError> { // select headers to prove - let parachains_source = ParachainsSource::

::new( + let parachains_source = ParachainsSource::::new( self.source_relay_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -231,8 +239,8 @@ where /// Background task that is responsible for starting parachain headers relay. async fn background_task( - source_relay_client: Client, - target_client: Client, + source_relay_client: impl Client, + target_client: impl Client, target_transaction_params: TransactionParams>, on_demand_source_relay_to_target_headers: Arc< dyn OnDemandRelay, @@ -255,9 +263,11 @@ async fn background_task( let parachains_relay_task = futures::future::Fuse::terminated(); futures::pin_mut!(parachains_relay_task); - let mut parachains_source = - ParachainsSource::

::new(source_relay_client.clone(), required_para_header_ref.clone()); - let mut parachains_target = ParachainsTarget::

::new( + let mut parachains_source = ParachainsSource::::new( + source_relay_client.clone(), + required_para_header_ref.clone(), + ); + let mut parachains_target = ParachainsTarget::::new( source_relay_client.clone(), target_client.clone(), target_transaction_params.clone(), @@ -446,9 +456,9 @@ struct RelayData { } /// Read required data from source and target clients. -async fn read_relay_data( - source: &ParachainsSource

, - target: &ParachainsTarget

, +async fn read_relay_data( + source: &ParachainsSource, + target: &ParachainsTarget, required_header_number: BlockNumberOf, ) -> Result< RelayData< @@ -459,7 +469,9 @@ async fn read_relay_data( FailedClient, > where - ParachainsTarget

: + SourceRelayClnt: Client, + TargetClnt: Client, + ParachainsTarget: TargetClient> + RelayClient, { let map_target_err = |e| { @@ -642,13 +654,19 @@ trait SelectHeadersToProveEnvironment { } #[async_trait] -impl<'a, P: SubstrateParachainsPipeline> +impl<'a, P: SubstrateParachainsPipeline, SourceRelayClnt, TargetClnt> SelectHeadersToProveEnvironment< BlockNumberOf, HashOf, BlockNumberOf, HashOf, - > for (&'a OnDemandParachainsRelay

, &'a ParachainsSource

) + > + for ( + &'a OnDemandParachainsRelay, + &'a ParachainsSource, + ) where + SourceRelayClnt: Client, + TargetClnt: Client, { fn parachain_id(&self) -> ParaId { ParaId(P::SourceParachain::PARACHAIN_ID) @@ -663,9 +681,8 @@ impl<'a, P: SubstrateParachainsPipeline> async fn best_finalized_relay_block_at_target( &self, ) -> Result, SubstrateError> { - Ok(crate::messages_source::read_client_state::( + Ok(crate::messages::source::read_client_state::( &self.0.target_client, - None, ) .await? .best_finalized_peer_at_best_self diff --git a/bridges/relays/lib-substrate-relay/src/parachains/source.rs b/bridges/relays/lib-substrate-relay/src/parachains/source.rs index 4cc512b9d9b45c7334ffb121c1a8613b7f118550..1aa12d1c913d11e13e95908db44c6302942fa94a 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/source.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/source.rs @@ -16,8 +16,10 @@ //! Parachain heads source. -use crate::parachains::{ParachainsPipelineAdapter, SubstrateParachainsPipeline}; - +use crate::{ + parachains::{ParachainsPipelineAdapter, SubstrateParachainsPipeline}, + proofs::to_raw_storage_proof, +}; use async_std::sync::{Arc, Mutex}; use async_trait::async_trait; use bp_parachains::parachain_head_storage_key_at_source; @@ -37,22 +39,24 @@ pub type RequiredHeaderIdRef = Arc>>>; /// Substrate client as parachain heads source. #[derive(Clone)] -pub struct ParachainsSource { - client: Client, +pub struct ParachainsSource { + client: SourceRelayClnt, max_head_id: RequiredHeaderIdRef, } -impl ParachainsSource

{ +impl> + ParachainsSource +{ /// Creates new parachains source client. pub fn new( - client: Client, + client: SourceRelayClnt, max_head_id: RequiredHeaderIdRef, ) -> Self { ParachainsSource { client, max_head_id } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { + pub fn client(&self) -> &SourceRelayClnt { &self.client } @@ -64,8 +68,8 @@ impl ParachainsSource

{ let para_id = ParaId(P::SourceParachain::PARACHAIN_ID); let storage_key = parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, para_id); - let para_head = self.client.raw_storage_value(storage_key, Some(at_block.1)).await?; - let para_head = para_head.map(|h| ParaHead::decode(&mut &h.0[..])).transpose()?; + let para_head: Option = + self.client.storage_value(at_block.hash(), storage_key).await?; let para_head = match para_head { Some(para_head) => para_head, None => return Ok(None), @@ -76,7 +80,9 @@ impl ParachainsSource

{ } #[async_trait] -impl RelayClient for ParachainsSource

{ +impl> RelayClient + for ParachainsSource +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -85,8 +91,8 @@ impl RelayClient for ParachainsSource

{ } #[async_trait] -impl SourceClient> - for ParachainsSource

+impl> + SourceClient> for ParachainsSource where P::SourceParachain: Chain, { @@ -149,12 +155,9 @@ where let parachain = ParaId(P::SourceParachain::PARACHAIN_ID); let storage_key = parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, parachain); - let parachain_heads_proof = self - .client - .prove_storage(vec![storage_key.clone()], at_block.1) - .await? - .into_iter_nodes() - .collect(); + + let storage_proof = + self.client.prove_storage(at_block.hash(), vec![storage_key.clone()]).await?; // why we're reading parachain head here once again (it has already been read at the // `parachain_head`)? that's because `parachain_head` sometimes returns obsolete parachain @@ -165,10 +168,8 @@ where // rereading actual value here let parachain_head = self .client - .raw_storage_value(storage_key, Some(at_block.1)) + .storage_value::(at_block.hash(), storage_key) .await? - .map(|h| ParaHead::decode(&mut &h.0[..])) - .transpose()? .ok_or_else(|| { SubstrateError::Custom(format!( "Failed to read expected parachain {parachain:?} head at {at_block:?}" @@ -176,6 +177,11 @@ where })?; let parachain_head_hash = parachain_head.hash(); - Ok((ParaHeadsProof { storage_proof: parachain_heads_proof }, parachain_head_hash)) + Ok(( + ParaHeadsProof { + storage_proof: to_raw_storage_proof::(storage_proof), + }, + parachain_head_hash, + )) } } diff --git a/bridges/relays/lib-substrate-relay/src/parachains/target.rs b/bridges/relays/lib-substrate-relay/src/parachains/target.rs index 531d55b53223609c523d521f43a38336353c597f..f66b193340c1a5e243b3bca17a111ed20f422c37 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/target.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/target.rs @@ -42,31 +42,42 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; +use sp_runtime::traits::Header; /// Substrate client as parachain heads source. -pub struct ParachainsTarget { - source_client: Client, - target_client: Client, +pub struct ParachainsTarget { + source_client: SourceClnt, + target_client: TargetClnt, transaction_params: TransactionParams>, } -impl ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, + > ParachainsTarget +{ /// Creates new parachains target client. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, transaction_params: TransactionParams>, ) -> Self { ParachainsTarget { source_client, target_client, transaction_params } } /// Returns reference to the underlying RPC client. - pub fn target_client(&self) -> &Client { + pub fn target_client(&self) -> &TargetClnt { &self.target_client } } -impl Clone for ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Clone, + > Clone for ParachainsTarget +{ fn clone(&self) -> Self { ParachainsTarget { source_client: self.source_client.clone(), @@ -77,7 +88,12 @@ impl Clone for ParachainsTarget

{ } #[async_trait] -impl RelayClient for ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for ParachainsTarget +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -88,14 +104,17 @@ impl RelayClient for ParachainsTarget

{ } #[async_trait] -impl

TargetClient> for ParachainsTarget

+impl TargetClient> + for ParachainsTarget where P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, P::SourceParachain: ChainBase, P::SourceRelayChain: ChainBase, { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn best_block(&self) -> Result, Self::Error> { let best_header = self.target_client.best_header().await?; @@ -109,10 +128,10 @@ where at_block: &HeaderIdOf, ) -> Result, Self::Error> { self.target_client - .typed_state_call::<_, Option>>( + .state_call::<_, Option>>( + at_block.hash(), P::SourceRelayChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), - Some(at_block.1), ) .await? .map(Ok) @@ -124,7 +143,11 @@ where ) -> Result>, Self::Error> { Ok(self .target_client - .typed_state_call(P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), None) + .state_call( + self.target_client.best_header().await?.hash(), + P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), + (), + ) .await .unwrap_or_else(|e| { log::info!( @@ -151,7 +174,7 @@ where &P::SourceParachain::PARACHAIN_ID.into(), ); let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + self.target_client.storage_value(at_block.hash(), storage_key).await?; let para_info = match storage_value { Some(para_info) => para_info, None => return Ok(None), @@ -172,7 +195,7 @@ where ¶_info.best_head_hash.head_hash, ); let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + self.target_client.storage_value(at_block.hash(), storage_key).await?; let para_head_number = match storage_value { Some(para_head_data) => para_head_data.decode_parachain_head_data::()?.number, diff --git a/bridges/relays/messages/Cargo.toml b/bridges/relays/messages/Cargo.toml index 570e11c0da6feeaa7bbbbd76a845df51444a10cb..c7a132bb3bae7ebc34728de8c94c41fb39c89751 100644 --- a/bridges/relays/messages/Cargo.toml +++ b/bridges/relays/messages/Cargo.toml @@ -11,19 +11,18 @@ publish = false workspace = true [dependencies] -async-std = { version = "1.9.0", features = ["attributes"] } -async-trait = "0.1.79" -env_logger = "0.11" -futures = "0.3.30" -hex = "0.4" +async-std = { features = ["attributes"], workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } +hex = { workspace = true, default-features = true } log = { workspace = true } -num-traits = "0.2" -parking_lot = "0.12.1" +num-traits = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } # Bridge Dependencies -bp-messages = { path = "../../primitives/messages" } -finality-relay = { path = "../finality" } -relay-utils = { path = "../utils" } +bp-messages = { workspace = true, default-features = true } +finality-relay = { workspace = true } +relay-utils = { workspace = true } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic" } +sp-arithmetic = { workspace = true, default-features = true } diff --git a/bridges/relays/parachains/Cargo.toml b/bridges/relays/parachains/Cargo.toml index 8d38e4e6bd07c2420adcf233729c1bac9bb77c37..ed03bdbb0f65e6f5e3b15c63f6ccd680c89e6626 100644 --- a/bridges/relays/parachains/Cargo.toml +++ b/bridges/relays/parachains/Cargo.toml @@ -11,18 +11,18 @@ publish = false workspace = true [dependencies] -async-std = "1.9.0" -async-trait = "0.1.79" -futures = "0.3.30" +async-std = { workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } log = { workspace = true } -relay-utils = { path = "../utils" } +relay-utils = { workspace = true } # Bridge dependencies -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -relay-substrate-client = { path = "../client-substrate" } +bp-polkadot-core = { workspace = true, default-features = true } +relay-substrate-client = { workspace = true } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -relay-substrate-client = { path = "../client-substrate", features = ["test-helpers"] } -sp-core = { path = "../../../substrate/primitives/core" } +codec = { workspace = true, default-features = true } +relay-substrate-client = { features = ["test-helpers"], workspace = true } +sp-core = { workspace = true, default-features = true } diff --git a/bridges/relays/parachains/src/parachains_loop.rs b/bridges/relays/parachains/src/parachains_loop.rs index fd73ca2d46c00f8e05bb05a14a7fa4104ef898c4..0fd1d72c7075bc29632280615f203e0c0028359b 100644 --- a/bridges/relays/parachains/src/parachains_loop.rs +++ b/bridges/relays/parachains/src/parachains_loop.rs @@ -680,7 +680,6 @@ impl SubmittedHeadsTracker

{ mod tests { use super::*; use async_std::sync::{Arc, Mutex}; - use codec::Encode; use futures::{SinkExt, StreamExt}; use relay_substrate_client::test_chain::{TestChain, TestParachain}; use relay_utils::{HeaderId, MaybeConnectionError}; @@ -821,8 +820,7 @@ mod tests { let head_result = SourceClient::::parachain_head(self, at_block).await?; let head = head_result.as_available().unwrap(); - let storage_proof = vec![head.hash().encode()]; - let proof = (ParaHeadsProof { storage_proof }, head.hash()); + let proof = (ParaHeadsProof { storage_proof: Default::default() }, head.hash()); self.data.lock().await.source_proof.clone().map(|_| proof) } } diff --git a/bridges/relays/utils/Cargo.toml b/bridges/relays/utils/Cargo.toml index 4765730a0b4f9906fd163600a7937f8d6aad661f..93e42763967b90c32409dcacbac22486a09a9892 100644 --- a/bridges/relays/utils/Cargo.toml +++ b/bridges/relays/utils/Cargo.toml @@ -11,29 +11,29 @@ publish = false workspace = true [dependencies] -ansi_term = "0.12" -anyhow = "1.0" -async-std = "1.9.0" -async-trait = "0.1.79" -backoff = "0.4" -isahc = "1.2" -env_logger = "0.11.3" -futures = "0.3.30" -jsonpath_lib = "0.3" +ansi_term = { workspace = true } +anyhow = { workspace = true } +async-std = { workspace = true } +async-trait = { workspace = true } +backoff = { workspace = true } +isahc = { workspace = true } +env_logger = { workspace = true } +futures = { workspace = true } +jsonpath_lib = { workspace = true } log = { workspace = true } -num-traits = "0.2" -parking_lot = "0.12.1" +num-traits = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sysinfo = "0.30" -time = { version = "0.3", features = ["formatting", "local-offset", "std"] } -tokio = { version = "1.37", features = ["rt"] } +sysinfo = { workspace = true } +time = { features = ["formatting", "local-offset", "std"], workspace = true } +tokio = { features = ["rt"], workspace = true, default-features = true } thiserror = { workspace = true } # Bridge dependencies -bp-runtime = { path = "../../primitives/runtime" } +bp-runtime = { workspace = true, default-features = true } # Substrate dependencies -sp-runtime = { path = "../../../substrate/primitives/runtime" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } +sp-runtime = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index cab2b06b0931e6d28df83434b3319c8384bd3679..666ac3fbc8a2ab32b485c088c37b536e751cdd46 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -17,34 +17,34 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { optional = true, workspace = true, default-features = true } serde_json = { optional = true, workspace = true, default-features = true } -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1", optional = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { optional = true, workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false } -snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } -snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures", default-features = false, optional = true } -snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } -static_assertions = { version = "1.1.0", default-features = false } -pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-features = false, optional = true } +snowbridge-core = { workspace = true } +snowbridge-ethereum = { workspace = true } +snowbridge-pallet-ethereum-client-fixtures = { optional = true, workspace = true } +snowbridge-beacon-primitives = { workspace = true } +static_assertions = { workspace = true } +pallet-timestamp = { optional = true, workspace = true } [dev-dependencies] -rand = "0.8.5" -sp-keyring = { path = "../../../../substrate/primitives/keyring" } +rand = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -hex-literal = "0.4.1" -pallet-timestamp = { path = "../../../../substrate/frame/timestamp" } -snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures" } -sp-io = { path = "../../../../substrate/primitives/io" } +hex-literal = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +snowbridge-pallet-ethereum-client-fixtures = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } [features] diff --git a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml index 858e2513a961288dd24c47e7d57ada1506b212d2..bd4176875733f64f61026e165a2a03b221193bad 100644 --- a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml @@ -15,11 +15,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hex-literal = { version = "0.4.1" } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -snowbridge-core = { path = "../../../primitives/core", default-features = false } -snowbridge-beacon-primitives = { path = "../../../primitives/beacon", default-features = false } +hex-literal = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index d63398770f207051ebb5adb72f4f574c767e8770..1b08bb39b4346a76e58c0a695b4c71126bd40510 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -16,35 +16,35 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { optional = true, workspace = true, default-features = true } -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1", optional = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -alloy-primitives = { version = "0.4.2", default-features = false, features = ["rlp"] } -alloy-sol-types = { version = "0.4.2", default-features = false } +alloy-primitives = { features = ["rlp"], workspace = true } +alloy-sol-types = { workspace = true } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false } -snowbridge-router-primitives = { path = "../../primitives/router", default-features = false } -snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } -snowbridge-pallet-inbound-queue-fixtures = { path = "fixtures", default-features = false, optional = true } +snowbridge-core = { workspace = true } +snowbridge-router-primitives = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } +snowbridge-pallet-inbound-queue-fixtures = { optional = true, workspace = true } [dev-dependencies] -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -snowbridge-pallet-ethereum-client = { path = "../ethereum-client" } -hex-literal = { version = "0.4.1" } +frame-benchmarking = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +snowbridge-pallet-ethereum-client = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml index e84246fb5a551b2512c4e00cb4ca00171e8c3f75..b66b57c3620ad5488d58b90a8f337ecd88f07e5d 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml @@ -15,11 +15,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hex-literal = { version = "0.4.1" } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -snowbridge-core = { path = "../../../primitives/core", default-features = false } -snowbridge-beacon-primitives = { path = "../../../primitives/beacon", default-features = false } +hex-literal = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index a842f9aa60cb9ae500c9e8a0079006da7c1bf5d0..a031676c6076ad417a9d0fd1f060e9387bb99eea 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -53,20 +53,11 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 15c6c3a5b32b0fc2bd1a95fd842bab78f07a697a..78546e258daa30e966ddef1ed48c35cebcc17d65 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -16,27 +16,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], workspace = true } -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true } +sp-arithmetic = { workspace = true } -bridge-hub-common = { path = "../../../../cumulus/parachains/runtimes/bridge-hubs/common", default-features = false } +bridge-hub-common = { workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -snowbridge-outbound-queue-merkle-tree = { path = "merkle-tree", default-features = false } -ethabi = { package = "ethabi-decode", version = "1.0.0", default-features = false } +snowbridge-core = { features = ["serde"], workspace = true } +snowbridge-outbound-queue-merkle-tree = { workspace = true } +ethabi = { workspace = true } [dev-dependencies] -pallet-message-queue = { path = "../../../../substrate/frame/message-queue", default-features = false } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } +pallet-message-queue = { workspace = true } +sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 1b1a9905928f8b5ea8eaccc15d18813f87406494..00cc700fbe832cea4e77ddabe3a148c42bd30bef 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -15,18 +15,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } -scale-info = { version = "2.7.0", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -hex-literal = { version = "0.4.1" } -env_logger = "0.11" -hex = "0.4" -array-bytes = "6.2.2" -sp-crypto-hashing = { path = "../../../../../substrate/primitives/crypto/hashing" } +hex-literal = { workspace = true, default-features = true } +env_logger = { workspace = true } +hex = { workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index b8d704f1cb92d570ea8e8b06cd00410bea7746bb..d35bdde5a81e7a80a228efff19c3c1de0eceefef 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -15,12 +15,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.6.12", package = "parity-scale-codec", features = ["derive"], default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -snowbridge-outbound-queue-merkle-tree = { path = "../merkle-tree", default-features = false } -snowbridge-core = { path = "../../../primitives/core", default-features = false } +codec = { features = ["derive"], workspace = true } +sp-std = { workspace = true } +sp-api = { workspace = true } +frame-support = { workspace = true } +snowbridge-outbound-queue-merkle-tree = { workspace = true } +snowbridge-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index 5bbbb1d9310da4c3617ec4b03ea63620c30feb20..f1e749afb9977c440ba7bbfa55c8a8acbc8c0cda 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -15,33 +15,33 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } log = { workspace = true } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false } +snowbridge-core = { workspace = true } [dev-dependencies] -hex = "0.4.1" -hex-literal = { version = "0.4.1" } -pallet-balances = { path = "../../../../substrate/frame/balances" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -polkadot-primitives = { path = "../../../../polkadot/primitives" } -pallet-message-queue = { path = "../../../../substrate/frame/message-queue" } -snowbridge-pallet-outbound-queue = { path = "../outbound-queue" } +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +snowbridge-pallet-outbound-queue = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index 42df5edfb7b2d4e5abaf0e30850ecbd3ebd04b98..7c524dd2edadb6132be50616f7e6855ad463c8b4 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -15,13 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -snowbridge-core = { path = "../../../primitives/core", default-features = false } +], workspace = true } +sp-std = { workspace = true } +sp-api = { workspace = true } +xcm = { workspace = true } +snowbridge-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index d7fc4152b371025687d2a36cbd49e628c88205fc..98bd3da9ab27c974a1a85abbb208064e41f89991 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -112,20 +112,11 @@ impl frame_system::Config for Test { type Block = Block; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_xcm_origin::Config for Test { diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index 18123910c35b2e198ec03ca1aa01aef1ea0d96ca..9ced99fbf3fdddd8f64877606f957da14c70f608 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -13,26 +13,26 @@ workspace = true [dependencies] serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -hex = { version = "0.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -rlp = { version = "0.5", default-features = false } +hex = { workspace = true } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +rlp = { workspace = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } +frame-support = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } -ssz_rs = { version = "0.9.0", default-features = false } -ssz_rs_derive = { version = "0.9.0", default-features = false } -byte-slice-cast = { version = "1.2.1", default-features = false } +ssz_rs = { workspace = true } +ssz_rs_derive = { workspace = true } +byte-slice-cast = { workspace = true } -snowbridge-ethereum = { path = "../ethereum", default-features = false } -milagro-bls = { package = "snowbridge-milagro-bls", version = "1.5.4", default-features = false } +snowbridge-ethereum = { workspace = true } +milagro-bls = { workspace = true } [dev-dependencies] -hex-literal = { version = "0.4.1" } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 573ab6608e5f91c0333f5ee7288cb679d6c38fb6..f9bee1ff4959ae56f73b50d4c91c2ea2e63bb0a6 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -13,28 +13,28 @@ workspace = true [dependencies] serde = { optional = true, features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1" } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } -polkadot-parachain-primitives = { path = "../../../../polkadot/parachain", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder", default-features = false } +polkadot-parachain-primitives = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-arithmetic = { workspace = true } -snowbridge-beacon-primitives = { path = "../beacon", default-features = false } +snowbridge-beacon-primitives = { workspace = true } -ethabi = { package = "ethabi-decode", version = "1.0.0", default-features = false } +ethabi = { workspace = true } [dev-dependencies] -hex = { version = "0.4.3" } +hex = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index fb0b6cbaf3c2fba82c709fbc84ca565c53e7505e..764ce90b8139d936d16e38a2f337c05004427675 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -14,23 +14,23 @@ workspace = true [dependencies] serde = { optional = true, features = ["derive"], workspace = true, default-features = true } serde-big-array = { optional = true, features = ["const-generics"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -ethbloom = { version = "0.13.0", default-features = false } -ethereum-types = { version = "0.14.1", default-features = false, features = ["codec", "rlp", "serialize"] } -hex-literal = { version = "0.4.1", default-features = false } -parity-bytes = { version = "0.1.2", default-features = false } -rlp = { version = "0.5.2", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +ethbloom = { workspace = true } +ethereum-types = { features = ["codec", "rlp", "serialize"], workspace = true } +hex-literal = { workspace = true } +parity-bytes = { workspace = true } +rlp = { workspace = true } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-io = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } -ethabi = { package = "ethabi-decode", version = "1.0.0", default-features = false } +ethabi = { workspace = true } [dev-dependencies] -wasm-bindgen-test = "0.3.19" -rand = "0.8.5" +wasm-bindgen-test = { workspace = true } +rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } [features] diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index ec0888dd41b0cd197b49efda9f83e188916fb8fa..ee8d481cec12ae07d107c0463b778345fcfcecee 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -12,25 +12,24 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } -snowbridge-core = { path = "../core", default-features = false } +snowbridge-core = { workspace = true } -hex-literal = { version = "0.4.1" } +hex-literal = { workspace = true, default-features = true } [dev-dependencies] -rustc-hex = { version = "2.1.0" } [features] default = ["std"] diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index 2372908b86ab5134f4ea0f8373ffe00cbcc2bd32..d47cb3cb7101fb54d7bdde854ff73e628555e86b 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -13,15 +13,15 @@ workspace = true [dependencies] log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +codec = { workspace = true } +frame-support = { workspace = true } +sp-std = { workspace = true } +sp-arithmetic = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false } +snowbridge-core = { workspace = true } [dev-dependencies] diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index e19c682de4542994e19e20d0c194598fc8009db5..6f8e586bf5ff12e7d870df95ffc6a0f02461357b 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -11,38 +11,38 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../substrate/frame/session", default-features = false } -pallet-message-queue = { path = "../../../../substrate/frame/message-queue", default-features = false } -pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-features = false } -pallet-utility = { path = "../../../../substrate/frame/utility", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-utility = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../cumulus/pallets/parachain-system", default-features = false } -pallet-collator-selection = { path = "../../../../cumulus/pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../../cumulus/parachains/pallets/parachain-info", default-features = false } -parachains-runtimes-test-utils = { path = "../../../../cumulus/parachains/runtimes/test-utils", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-runtimes-test-utils = { workspace = true } # Ethereum Bridge (Snowbridge) -snowbridge-core = { path = "../../primitives/core", default-features = false } -snowbridge-pallet-ethereum-client = { path = "../../pallets/ethereum-client", default-features = false } -snowbridge-pallet-ethereum-client-fixtures = { path = "../../pallets/ethereum-client/fixtures", default-features = false } -snowbridge-pallet-outbound-queue = { path = "../../pallets/outbound-queue", default-features = false } -snowbridge-pallet-system = { path = "../../pallets/system", default-features = false } +snowbridge-core = { workspace = true } +snowbridge-pallet-ethereum-client = { workspace = true } +snowbridge-pallet-ethereum-client-fixtures = { workspace = true } +snowbridge-pallet-outbound-queue = { workspace = true } +snowbridge-pallet-system = { workspace = true } [features] default = ["std"] diff --git a/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json index b2dddaa19ed1561b98422b3a28f6777308b3ba47..ca3abcc528cfaabc19cebf47426ef62f4ea5a8a6 100644 --- a/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json +++ b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json @@ -736,9 +736,9 @@ } }, "node_modules/ws": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", - "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", "engines": { "node": ">=10.0.0" }, diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 410ac8b983d96f0a38633ac0199208a4e249e49b..9b6f6b73960b416c481b43f053477c70e55b8495 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -10,15 +10,15 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } -url = "2.4.0" +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } +url = { workspace = true } # Substrate -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-service = { path = "../../../substrate/client/service" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 39cedf87a0cb1b6fb8296c1a3bdec1483170af38..6ebde0c2c653b8279ead203bdabeafd3ab8292e1 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -10,41 +10,41 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -tracing = "0.1.25" +parking_lot = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } +sc-client-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } # Polkadot -polkadot-node-primitives = { path = "../../../polkadot/node/primitives" } -polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } -polkadot-overseer = { path = "../../../polkadot/node/overseer" } -polkadot-primitives = { path = "../../../polkadot/primitives" } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-client-consensus-common = { path = "../consensus/common" } -cumulus-client-network = { path = "../network" } -cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-network = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } [dev-dependencies] -async-trait = "0.1.79" +async-trait = { workspace = true } # Substrate -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } # Polkadot -polkadot-node-subsystem-test-helpers = { path = "../../../polkadot/node/subsystem-test-helpers" } +polkadot-node-subsystem-test-helpers = { workspace = true } # Cumulus -cumulus-test-client = { path = "../../test/client" } -cumulus-test-runtime = { path = "../../test/runtime" } -cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } +cumulus-test-client = { workspace = true } +cumulus-test-runtime = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index fad30e59e869d3a4144b33cdd34c6b73990d7e9f..01e07cb395a955dfe3016aef3c8bd3ac5e2be7c9 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -10,44 +10,47 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.28" -tracing = "0.1.37" -schnellru = "0.2.1" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +parking_lot = { workspace = true } +tracing = { workspace = true, default-features = true } +schnellru = { workspace = true } +tokio = { workspace = true, features = ["macros"] } # Substrate -sc-client-api = { path = "../../../../substrate/client/api" } -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sc-consensus-aura = { path = "../../../../substrate/client/consensus/aura" } -sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } -sc-consensus-slots = { path = "../../../../substrate/client/consensus/slots" } -sc-telemetry = { path = "../../../../substrate/client/telemetry" } -sp-api = { path = "../../../../substrate/primitives/api" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-block-builder = { path = "../../../../substrate/primitives/block-builder" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-slots = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Cumulus -cumulus-client-consensus-common = { path = "../common" } -cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } -cumulus-client-consensus-proposer = { path = "../proposer" } -cumulus-client-parachain-inherent = { path = "../../parachain-inherent" } -cumulus-primitives-aura = { path = "../../../primitives/aura" } -cumulus-primitives-core = { path = "../../../primitives/core" } -cumulus-client-collator = { path = "../../collator" } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-primitives-aura = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../../polkadot/primitives" } -polkadot-node-primitives = { path = "../../../../polkadot/node/primitives" } -polkadot-node-subsystem = { path = "../../../../polkadot/node/subsystem" } -polkadot-overseer = { path = "../../../../polkadot/node/overseer" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 776052215d9397c529699ed07040819f666e16b5..dc830e463a4f5bca1f39ec82a11d5364b148c675 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -156,15 +156,8 @@ where Ok((paras_inherent_data, other_inherent_data)) } - /// Propose, seal, and import a block, packaging it into a collation. - /// - /// Provide the slot to build at as well as any other necessary pre-digest logs, - /// the inherent data, and the proposal duration and PoV size limits. - /// - /// The Aura pre-digest should not be explicitly provided and is set internally. - /// - /// This does not announce the collation to the parachain network or the relay chain. - pub async fn collate( + /// Build and import a parachain block on the given parent header, using the given slot claim. + pub async fn build_block_and_import( &mut self, parent_header: &Block::Header, slot_claim: &SlotClaim, @@ -172,10 +165,7 @@ where inherent_data: (ParachainInherentData, InherentData), proposal_duration: Duration, max_pov_size: usize, - ) -> Result< - Option<(Collation, ParachainBlockData, Block::Hash)>, - Box, - > { + ) -> Result>, Box> { let mut digest = additional_pre_digest.into().unwrap_or_default(); digest.push(slot_claim.pre_digest.clone()); @@ -205,7 +195,6 @@ where ) .map_err(|e| e as Box)?; - let post_hash = sealed_importable.post_hash(); let block = Block::new( sealed_importable.post_header(), sealed_importable @@ -220,11 +209,46 @@ where .map_err(|e| Box::new(e) as Box) .await?; - if let Some((collation, block_data)) = self.collator_service.build_collation( - parent_header, - post_hash, - ParachainCandidate { block, proof: proposal.proof }, - ) { + Ok(Some(ParachainCandidate { block, proof: proposal.proof })) + } + + /// Propose, seal, import a block and packaging it into a collation. + /// + /// Provide the slot to build at as well as any other necessary pre-digest logs, + /// the inherent data, and the proposal duration and PoV size limits. + /// + /// The Aura pre-digest should not be explicitly provided and is set internally. + /// + /// This does not announce the collation to the parachain network or the relay chain. + pub async fn collate( + &mut self, + parent_header: &Block::Header, + slot_claim: &SlotClaim, + additional_pre_digest: impl Into>>, + inherent_data: (ParachainInherentData, InherentData), + proposal_duration: Duration, + max_pov_size: usize, + ) -> Result< + Option<(Collation, ParachainBlockData, Block::Hash)>, + Box, + > { + let maybe_candidate = self + .build_block_and_import( + parent_header, + slot_claim, + additional_pre_digest, + inherent_data, + proposal_duration, + max_pov_size, + ) + .await?; + + let Some(candidate) = maybe_candidate else { return Ok(None) }; + + let hash = candidate.block.header().hash(); + if let Some((collation, block_data)) = + self.collator_service.build_collation(parent_header, hash, candidate) + { tracing::info!( target: crate::LOG_TARGET, "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", @@ -241,7 +265,7 @@ where ); } - Ok(Some((collation, block_data, post_hash))) + Ok(Some((collation, block_data, hash))) } else { Err(Box::::from("Unable to produce collation") as Box) diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 1047c6219ad132403014cacaf3d071d8009b9dbc..4efd50a04ec6ec654ce7b32ac17eb07d12df3d6c 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -41,7 +41,6 @@ use sc_consensus::BlockImport; use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; -use sp_consensus::SyncOracle; use sp_consensus_aura::AuraApi; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; @@ -53,7 +52,7 @@ use std::{sync::Arc, time::Duration}; use crate::collator as collator_util; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -64,8 +63,6 @@ pub struct Params { pub para_client: Arc, /// A handle to the relay-chain client. pub relay_client: RClient, - /// A chain synchronization oracle. - pub sync_oracle: SO, /// The underlying keystore, which should contain Aura consensus keys. pub keystore: KeystorePtr, /// The collator key used to sign collations before submitting to validators. @@ -89,8 +86,8 @@ pub struct Params { } /// Run bare Aura consensus as a relay-chain-driven collator. -pub fn run( - params: Params, +pub fn run( + params: Params, ) -> impl Future + Send + 'static where Block: BlockT + Send, @@ -108,7 +105,6 @@ where CIDP: CreateInherentDataProviders + Send + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, P: Pair, diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 09416233ea9b39dfd4bd4126149d51f922d7b6e4..749b131123949de9a00801558017b6637e48380d 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -33,46 +33,34 @@ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{ - self as consensus_common, load_abridged_host_configuration, ParachainBlockImportMarker, - ParentSearchParams, -}; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{ - relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, -}; +use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::SubmitCollationParams; -use polkadot_node_subsystem::messages::{ - CollationGenerationMessage, RuntimeApiMessage, RuntimeApiRequest, -}; +use polkadot_node_subsystem::messages::CollationGenerationMessage; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{ - AsyncBackingParams, CollatorPair, CoreIndex, CoreState, Id as ParaId, OccupiedCoreAssumption, -}; +use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; -use futures::{channel::oneshot, prelude::*}; +use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; -use sc_consensus_aura::standalone as aura_internal; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; -use sp_consensus::SyncOracle; use sp_consensus_aura::{AuraApi, Slot}; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; -use sp_timestamp::Timestamp; use std::{sync::Arc, time::Duration}; -use crate::collator::{self as collator_util, SlotClaim}; +use crate::collator::{self as collator_util}; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -87,8 +75,6 @@ pub struct Params { pub relay_client: RClient, /// A validation code hash provider, used to get the current validation code hash. pub code_hash_provider: CHP, - /// A chain synchronization oracle. - pub sync_oracle: SO, /// The underlying keystore, which should contain Aura consensus keys. pub keystore: KeystorePtr, /// The collator key used to sign collations before submitting to validators. @@ -110,8 +96,8 @@ pub struct Params { } /// Run async-backing-friendly Aura. -pub fn run( - mut params: Params, +pub fn run( + mut params: Params, ) -> impl Future + Send + 'static where Block: BlockT, @@ -130,7 +116,6 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, @@ -138,14 +123,6 @@ where P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, { - // This is an arbitrary value which is likely guaranteed to exceed any reasonable - // limit, as it would correspond to 10 non-included blocks. - // - // Since we only search for parent blocks which have already been imported, - // we can guarantee that all imported blocks respect the unincluded segment - // rules specified by the parachain's runtime and thus will never be too deep. - const PARENT_SEARCH_DEPTH: usize = 10; - async move { cumulus_client_collator::initialize_collator_subsystems( &mut params.overseer_handle, @@ -186,12 +163,9 @@ where while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); - // TODO: Currently we use just the first core here, but for elastic scaling - // we iterate and build on all of the cores returned. - let core_index = if let Some(core_index) = cores_scheduled_for_para( + let core_index = if let Some(core_index) = super::cores_scheduled_for_para( relay_parent, params.para_id, - &mut params.overseer_handle, &mut params.relay_client, ) .await @@ -226,42 +200,16 @@ where }, }; - let parent_search_params = ParentSearchParams { + let (included_block, initial_parent) = match crate::collators::find_parent( relay_parent, - para_id: params.para_id, - ancestry_lookback: async_backing_params(relay_parent, ¶ms.relay_client) - .await - .map(|c| c.allowed_ancestry_len as usize) - .unwrap_or(0), - max_depth: PARENT_SEARCH_DEPTH, - ignore_alternative_branches: true, - }; - - let potential_parents = - cumulus_client_consensus_common::find_potential_parents::( - parent_search_params, - &*params.para_backend, - ¶ms.relay_client, - ) - .await; - - let mut potential_parents = match potential_parents { - Err(e) => { - tracing::error!( - target: crate::LOG_TARGET, - ?relay_parent, - err = ?e, - "Could not fetch potential parents to build upon" - ); - - continue - }, - Ok(x) => x, - }; - - let included_block = match potential_parents.iter().find(|x| x.depth == 0) { - None => continue, // also serves as an `is_empty` check. - Some(b) => b.hash, + params.para_id, + &*params.para_backend, + ¶ms.relay_client, + ) + .await + { + Some(value) => value, + None => continue, }; let para_client = &*params.para_client; @@ -292,7 +240,7 @@ where relay_chain_slot_duration = ?params.relay_chain_slot_duration, "Adjusted relay-chain slot to parachain slot" ); - Some(can_build_upon::<_, _, P>( + Some(super::can_build_upon::<_, _, P>( slot_now, timestamp, block_hash, @@ -302,13 +250,6 @@ where )) }; - // Sort by depth, ascending, to choose the longest chain. - // - // If the longest chain has space, build upon that. Otherwise, don't - // build at all. - potential_parents.sort_by_key(|a| a.depth); - let Some(initial_parent) = potential_parents.pop() else { continue }; - // Build in a loop until not allowed. Note that the authorities can change // at any block, so we need to re-claim our slot every time. let mut parent_hash = initial_parent.hash; @@ -363,13 +304,11 @@ where Ok(x) => x, }; - let validation_code_hash = match params.code_hash_provider.code_hash_at(parent_hash) - { - None => { - tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); - break - }, - Some(v) => v, + let Some(validation_code_hash) = + params.code_hash_provider.code_hash_at(parent_hash) + else { + tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); + break }; super::check_validation_code_or_log( @@ -437,124 +376,3 @@ where } } } - -// Checks if we own the slot at the given block and whether there -// is space in the unincluded segment. -async fn can_build_upon( - slot: Slot, - timestamp: Timestamp, - parent_hash: Block::Hash, - included_block: Block::Hash, - client: &Client, - keystore: &KeystorePtr, -) -> Option> -where - Client: ProvideRuntimeApi, - Client::Api: AuraApi + AuraUnincludedSegmentApi, - P: Pair, - P::Public: Codec, - P::Signature: Codec, -{ - let runtime_api = client.runtime_api(); - let authorities = runtime_api.authorities(parent_hash).ok()?; - let author_pub = aura_internal::claim_slot::

(slot, &authorities, keystore).await?; - - // Here we lean on the property that building on an empty unincluded segment must always - // be legal. Skipping the runtime API query here allows us to seamlessly run this - // collator against chains which have not yet upgraded their runtime. - if parent_hash != included_block { - if !runtime_api.can_build_upon(parent_hash, included_block, slot).ok()? { - return None - } - } - - Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) -} - -/// Reads async backing parameters from the relay chain storage at the given relay parent. -async fn async_backing_params( - relay_parent: PHash, - relay_client: &impl RelayChainInterface, -) -> Option { - match load_abridged_host_configuration(relay_parent, relay_client).await { - Ok(Some(config)) => Some(config.async_backing_params), - Ok(None) => { - tracing::error!( - target: crate::LOG_TARGET, - "Active config is missing in relay chain storage", - ); - None - }, - Err(err) => { - tracing::error!( - target: crate::LOG_TARGET, - ?err, - ?relay_parent, - "Failed to read active config from relay chain client", - ); - None - }, - } -} - -// Return all the cores assigned to the para at the provided relay parent. -async fn cores_scheduled_for_para( - relay_parent: PHash, - para_id: ParaId, - overseer_handle: &mut OverseerHandle, - relay_client: &impl RelayChainInterface, -) -> Vec { - // Get `AvailabilityCores` from runtime - let (tx, rx) = oneshot::channel(); - let request = RuntimeApiRequest::AvailabilityCores(tx); - overseer_handle - .send_msg(RuntimeApiMessage::Request(relay_parent, request), "LookaheadCollator") - .await; - - let cores = match rx.await { - Ok(Ok(cores)) => cores, - Ok(Err(error)) => { - tracing::error!( - target: crate::LOG_TARGET, - ?error, - ?relay_parent, - "Failed to query availability cores runtime API", - ); - return Vec::new() - }, - Err(oneshot::Canceled) => { - tracing::error!( - target: crate::LOG_TARGET, - ?relay_parent, - "Sender for availability cores runtime request dropped", - ); - return Vec::new() - }, - }; - - let max_candidate_depth = async_backing_params(relay_parent, relay_client) - .await - .map(|c| c.max_candidate_depth) - .unwrap_or(0); - - cores - .iter() - .enumerate() - .filter_map(|(index, core)| { - let core_para_id = match core { - CoreState::Scheduled(scheduled_core) => Some(scheduled_core.para_id), - CoreState::Occupied(occupied_core) if max_candidate_depth >= 1 => occupied_core - .next_up_on_available - .as_ref() - .map(|scheduled_core| scheduled_core.para_id), - CoreState::Free | CoreState::Occupied(_) => None, - }; - - if core_para_id == Some(para_id) { - Some(CoreIndex(index as u32)) - } else { - None - } - }) - .collect() -} diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 6e0067d0cedb602face8943737f99f3cb1a201a3..7d430ecdc727ab2c44b84bec509938a22da4c3f8 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -20,13 +20,35 @@ //! included parachain block, as well as the [`lookahead`] collator, which prospectively //! builds on parachain blocks which have not yet been included in the relay chain. +use crate::collator::SlotClaim; +use codec::Codec; +use cumulus_client_consensus_common::{ + self as consensus_common, load_abridged_host_configuration, ParentSearchParams, +}; +use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; +use cumulus_primitives_core::{relay_chain::Hash as ParaHash, BlockT}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_primitives::{ - Hash as RHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash, + AsyncBackingParams, CoreIndex, CoreState, Hash as RelayHash, Id as ParaId, + OccupiedCoreAssumption, ValidationCodeHash, }; +use sc_consensus_aura::{standalone as aura_internal, AuraApi}; +use sp_api::ProvideRuntimeApi; +use sp_core::Pair; +use sp_keystore::KeystorePtr; +use sp_timestamp::Timestamp; pub mod basic; pub mod lookahead; +pub mod slot_based; + +// This is an arbitrary value which is likely guaranteed to exceed any reasonable +// limit, as it would correspond to 10 non-included blocks. +// +// Since we only search for parent blocks which have already been imported, +// we can guarantee that all imported blocks respect the unincluded segment +// rules specified by the parachain's runtime and thus will never be too deep. +const PARENT_SEARCH_DEPTH: usize = 10; /// Check the `local_validation_code_hash` against the validation code hash in the relay chain /// state. @@ -36,7 +58,7 @@ async fn check_validation_code_or_log( local_validation_code_hash: &ValidationCodeHash, para_id: ParaId, relay_client: &impl RelayChainInterface, - relay_parent: RHash, + relay_parent: RelayHash, ) { let state_validation_code_hash = match relay_client .validation_code_hash(relay_parent, para_id, OccupiedCoreAssumption::Included) @@ -64,7 +86,7 @@ async fn check_validation_code_or_log( ?relay_parent, ?local_validation_code_hash, relay_validation_code_hash = ?state, - "Parachain code doesn't match validation code stored in the relay chain state", + "Parachain code doesn't match validation code stored in the relay chain state.", ); }, None => { @@ -77,3 +99,159 @@ async fn check_validation_code_or_log( }, } } + +/// Reads async backing parameters from the relay chain storage at the given relay parent. +async fn async_backing_params( + relay_parent: RelayHash, + relay_client: &impl RelayChainInterface, +) -> Option { + match load_abridged_host_configuration(relay_parent, relay_client).await { + Ok(Some(config)) => Some(config.async_backing_params), + Ok(None) => { + tracing::error!( + target: crate::LOG_TARGET, + "Active config is missing in relay chain storage", + ); + None + }, + Err(err) => { + tracing::error!( + target: crate::LOG_TARGET, + ?err, + ?relay_parent, + "Failed to read active config from relay chain client", + ); + None + }, + } +} + +// Return all the cores assigned to the para at the provided relay parent. +async fn cores_scheduled_for_para( + relay_parent: RelayHash, + para_id: ParaId, + relay_client: &impl RelayChainInterface, +) -> Vec { + // Get `AvailabilityCores` from runtime + let cores = match relay_client.availability_cores(relay_parent).await { + Ok(cores) => cores, + Err(error) => { + tracing::error!( + target: crate::LOG_TARGET, + ?error, + ?relay_parent, + "Failed to query availability cores runtime API", + ); + return Vec::new() + }, + }; + + let max_candidate_depth = async_backing_params(relay_parent, relay_client) + .await + .map(|c| c.max_candidate_depth) + .unwrap_or(0); + + cores + .iter() + .enumerate() + .filter_map(|(index, core)| { + let core_para_id = match core { + CoreState::Scheduled(scheduled_core) => Some(scheduled_core.para_id), + CoreState::Occupied(occupied_core) if max_candidate_depth > 0 => occupied_core + .next_up_on_available + .as_ref() + .map(|scheduled_core| scheduled_core.para_id), + CoreState::Free | CoreState::Occupied(_) => None, + }; + + if core_para_id == Some(para_id) { + Some(CoreIndex(index as u32)) + } else { + None + } + }) + .collect() +} + +// Checks if we own the slot at the given block and whether there +// is space in the unincluded segment. +async fn can_build_upon( + slot: Slot, + timestamp: Timestamp, + parent_hash: Block::Hash, + included_block: Block::Hash, + client: &Client, + keystore: &KeystorePtr, +) -> Option> +where + Client: ProvideRuntimeApi, + Client::Api: AuraApi + AuraUnincludedSegmentApi, + P: Pair, + P::Public: Codec, + P::Signature: Codec, +{ + let runtime_api = client.runtime_api(); + let authorities = runtime_api.authorities(parent_hash).ok()?; + let author_pub = aura_internal::claim_slot::

(slot, &authorities, keystore).await?; + + // Here we lean on the property that building on an empty unincluded segment must always + // be legal. Skipping the runtime API query here allows us to seamlessly run this + // collator against chains which have not yet upgraded their runtime. + if parent_hash != included_block && + !runtime_api.can_build_upon(parent_hash, included_block, slot).ok()? + { + return None + } + + Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) +} + +/// Use [`cumulus_client_consensus_common::find_potential_parents`] to find parachain blocks that +/// we can build on. Once a list of potential parents is retrieved, return the last one of the +/// longest chain. +async fn find_parent( + relay_parent: ParaHash, + para_id: ParaId, + para_backend: &impl sc_client_api::Backend, + relay_client: &impl RelayChainInterface, +) -> Option<(::Hash, consensus_common::PotentialParent)> +where + Block: BlockT, +{ + let parent_search_params = ParentSearchParams { + relay_parent, + para_id, + ancestry_lookback: crate::collators::async_backing_params(relay_parent, relay_client) + .await + .map_or(0, |params| params.allowed_ancestry_len as usize), + max_depth: PARENT_SEARCH_DEPTH, + ignore_alternative_branches: true, + }; + + let potential_parents = cumulus_client_consensus_common::find_potential_parents::( + parent_search_params, + para_backend, + relay_client, + ) + .await; + + let potential_parents = match potential_parents { + Err(e) => { + tracing::error!( + target: crate::LOG_TARGET, + ?relay_parent, + err = ?e, + "Could not fetch potential parents to build upon" + ); + + return None + }, + Ok(x) => x, + }; + + let included_block = potential_parents.iter().find(|x| x.depth == 0)?.hash; + potential_parents + .into_iter() + .max_by_key(|a| a.depth) + .map(|parent| (included_block, parent)) +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs new file mode 100644 index 0000000000000000000000000000000000000000..1fbc0689da862999367a0c4a9bda59ed3d6525af --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -0,0 +1,491 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use codec::{Codec, Encode}; + +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_primitives::{ + BlockId, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, + OccupiedCoreAssumption, +}; + +use futures::prelude::*; +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; +use sc_consensus::BlockImport; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_blockchain::HeaderBackend; +use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use sp_timestamp::Timestamp; +use std::{sync::Arc, time::Duration}; + +use super::CollatorMessage; +use crate::{ + collator::{self as collator_util}, + collators::{check_validation_code_or_log, cores_scheduled_for_para}, + LOG_TARGET, +}; + +/// Parameters for [`run_block_builder`]. +pub struct BuilderTaskParams< + Block: BlockT, + BI, + CIDP, + Client, + Backend, + RelayClient, + CHP, + Proposer, + CS, +> { + /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. + /// the timestamp, slot, and paras inherents should be omitted, as they are set by this + /// collator. + pub create_inherent_data_providers: CIDP, + /// Used to actually import blocks. + pub block_import: BI, + /// The underlying para client. + pub para_client: Arc, + /// The para client's backend, used to access the database. + pub para_backend: Arc, + /// A handle to the relay-chain client. + pub relay_client: RelayClient, + /// A validation code hash provider, used to get the current validation code hash. + pub code_hash_provider: CHP, + /// The underlying keystore, which should contain Aura consensus keys. + pub keystore: KeystorePtr, + /// The para's ID. + pub para_id: ParaId, + /// The underlying block proposer this should call into. + pub proposer: Proposer, + /// The generic collator service used to plug into this consensus engine. + pub collator_service: CS, + /// The amount of time to spend authoring each block. + pub authoring_duration: Duration, + /// Channel to send built blocks to the collation task. + pub collator_sender: sc_utils::mpsc::TracingUnboundedSender>, + /// Slot duration of the relay chain + pub relay_chain_slot_duration: Duration, + /// Drift every slot by this duration. + /// This is a time quantity that is subtracted from the actual timestamp when computing + /// the time left to enter a new slot. In practice, this *left-shifts* the clock time with the + /// intent to keep our "clock" slightly behind the relay chain one and thus reducing the + /// likelihood of encountering unfavorable notification arrival timings (i.e. we don't want to + /// wait for relay chain notifications because we woke up too early). + pub slot_drift: Duration, +} + +#[derive(Debug)] +struct SlotInfo { + pub timestamp: Timestamp, + pub slot: Slot, + pub slot_duration: SlotDuration, +} + +#[derive(Debug)] +struct SlotTimer { + client: Arc, + drift: Duration, + _marker: std::marker::PhantomData<(Block, Box)>, +} + +/// Returns current duration since Unix epoch. +fn duration_now() -> Duration { + use std::time::SystemTime; + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| { + panic!("Current time {:?} is before Unix epoch. Something is wrong: {:?}", now, e) + }) +} + +/// Returns the duration until the next slot from now. +fn time_until_next_slot(slot_duration: Duration, drift: Duration) -> Duration { + let now = duration_now().as_millis() - drift.as_millis(); + + let next_slot = (now + slot_duration.as_millis()) / slot_duration.as_millis(); + let remaining_millis = next_slot * slot_duration.as_millis() - now; + Duration::from_millis(remaining_millis as u64) +} + +impl SlotTimer +where + Block: BlockT, + Client: ProvideRuntimeApi + Send + Sync + 'static + UsageProvider, + Client::Api: AuraApi, + P: Pair, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, +{ + pub fn new_with_drift(client: Arc, drift: Duration) -> Self { + Self { client, drift, _marker: Default::default() } + } + + /// Returns a future that resolves when the next slot arrives. + pub async fn wait_until_next_slot(&self) -> Result { + let Ok(slot_duration) = crate::slot_duration(&*self.client) else { + tracing::error!(target: crate::LOG_TARGET, "Failed to fetch slot duration from runtime."); + return Err(()) + }; + + let time_until_next_slot = time_until_next_slot(slot_duration.as_duration(), self.drift); + tokio::time::sleep(time_until_next_slot).await; + let timestamp = sp_timestamp::Timestamp::current(); + Ok(SlotInfo { + slot: Slot::from_timestamp(timestamp, slot_duration), + timestamp, + slot_duration, + }) + } +} + +/// Run block-builder. +pub fn run_block_builder( + params: BuilderTaskParams, +) -> impl Future + Send + 'static +where + Block: BlockT, + Client: ProvideRuntimeApi + + UsageProvider + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: + AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Backend: sc_client_api::Backend + 'static, + RelayClient: RelayChainInterface + Clone + 'static, + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + P: Pair, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, +{ + async move { + tracing::info!(target: LOG_TARGET, "Starting slot-based block-builder task."); + let BuilderTaskParams { + relay_client, + create_inherent_data_providers, + para_client, + keystore, + block_import, + para_id, + proposer, + collator_service, + collator_sender, + code_hash_provider, + authoring_duration, + para_backend, + relay_chain_slot_duration, + slot_drift, + } = params; + + let slot_timer = SlotTimer::<_, _, P>::new_with_drift(para_client.clone(), slot_drift); + + let mut collator = { + let params = collator_util::Params { + create_inherent_data_providers, + block_import, + relay_client: relay_client.clone(), + keystore: keystore.clone(), + para_id, + proposer, + collator_service, + }; + + collator_util::Collator::::new(params) + }; + + let mut relay_chain_fetcher = RelayChainCachingFetcher::new(relay_client.clone(), para_id); + + loop { + // We wait here until the next slot arrives. + let Ok(para_slot) = slot_timer.wait_until_next_slot().await else { + return; + }; + + let Some(expected_cores) = + expected_core_count(relay_chain_slot_duration, para_slot.slot_duration) + else { + return + }; + + let Ok(RelayChainData { + relay_parent_header, + max_pov_size, + relay_parent_hash: relay_parent, + scheduled_cores, + }) = relay_chain_fetcher.get_relay_chain_data().await + else { + continue; + }; + + if scheduled_cores.is_empty() { + tracing::debug!(target: LOG_TARGET, "Parachain not scheduled, skipping slot."); + continue; + } + + let core_index_in_scheduled: u64 = *para_slot.slot % expected_cores; + let Some(core_index) = scheduled_cores.get(core_index_in_scheduled as usize) else { + tracing::debug!(target: LOG_TARGET, core_index_in_scheduled, core_len = scheduled_cores.len(), "Para is scheduled, but not enough cores available."); + continue; + }; + + let Some((included_block, parent)) = + crate::collators::find_parent(relay_parent, para_id, &*para_backend, &relay_client) + .await + else { + continue + }; + + let parent_header = parent.header; + let parent_hash = parent.hash; + + // We mainly call this to inform users at genesis if there is a mismatch with the + // on-chain data. + collator.collator_service().check_block_status(parent_hash, &parent_header); + + let slot_claim = match crate::collators::can_build_upon::<_, _, P>( + para_slot.slot, + para_slot.timestamp, + parent_hash, + included_block, + &*para_client, + &keystore, + ) + .await + { + Some(slot) => slot, + None => { + tracing::debug!( + target: crate::LOG_TARGET, + ?core_index, + slot_info = ?para_slot, + unincluded_segment_len = parent.depth, + relay_parent = %relay_parent, + included = %included_block, + parent = %parent_hash, + "Not building block." + ); + continue + }, + }; + + tracing::debug!( + target: crate::LOG_TARGET, + ?core_index, + slot_info = ?para_slot, + unincluded_segment_len = parent.depth, + relay_parent = %relay_parent, + included = %included_block, + parent = %parent_hash, + "Building block." + ); + + let validation_data = PersistedValidationData { + parent_head: parent_header.encode().into(), + relay_parent_number: *relay_parent_header.number(), + relay_parent_storage_root: *relay_parent_header.state_root(), + max_pov_size, + }; + + let (parachain_inherent_data, other_inherent_data) = match collator + .create_inherent_data( + relay_parent, + &validation_data, + parent_hash, + slot_claim.timestamp(), + ) + .await + { + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err); + break + }, + Ok(x) => x, + }; + + let validation_code_hash = match code_hash_provider.code_hash_at(parent_hash) { + None => { + tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); + break + }, + Some(v) => v, + }; + + check_validation_code_or_log( + &validation_code_hash, + para_id, + &relay_client, + relay_parent, + ) + .await; + + let Ok(Some(candidate)) = collator + .build_block_and_import( + &parent_header, + &slot_claim, + None, + (parachain_inherent_data, other_inherent_data), + authoring_duration, + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + (validation_data.max_pov_size / 2) as usize, + ) + .await + else { + tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); + continue; + }; + + let new_block_hash = candidate.block.header().hash(); + + // Announce the newly built block to our peers. + collator.collator_service().announce_block(new_block_hash, None); + + if let Err(err) = collator_sender.unbounded_send(CollatorMessage { + relay_parent, + parent_header, + parachain_candidate: candidate, + validation_code_hash, + core_index: *core_index, + }) { + tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); + return + } + } + } +} + +/// Calculate the expected core count based on the slot duration of the relay and parachain. +/// +/// If `slot_duration` is smaller than `relay_chain_slot_duration` that means that we produce more +/// than one parachain block per relay chain block. In order to get these backed, we need multiple +/// cores. This method calculates how many cores we should expect to have scheduled under the +/// assumption that we have a fixed number of cores assigned to our parachain. +fn expected_core_count( + relay_chain_slot_duration: Duration, + slot_duration: SlotDuration, +) -> Option { + let slot_duration_millis = slot_duration.as_millis(); + u64::try_from(relay_chain_slot_duration.as_millis()) + .map_err(|e| tracing::error!("Unable to calculate expected parachain core count: {e}")) + .map(|relay_slot_duration| (relay_slot_duration / slot_duration_millis).max(1)) + .ok() +} + +/// Contains relay chain data necessary for parachain block building. +#[derive(Clone)] +struct RelayChainData { + /// Current relay chain parent header. + pub relay_parent_header: RelayHeader, + /// The cores this para is scheduled on in the context of the relay parent. + pub scheduled_cores: Vec, + /// Maximum configured PoV size on the relay chain. + pub max_pov_size: u32, + /// Current relay chain parent header. + pub relay_parent_hash: RelayHash, +} + +/// Simple helper to fetch relay chain data and cache it based on the current relay chain best block +/// hash. +struct RelayChainCachingFetcher { + relay_client: RI, + para_id: ParaId, + last_data: Option<(RelayHash, RelayChainData)>, +} + +impl RelayChainCachingFetcher +where + RI: RelayChainInterface + Clone + 'static, +{ + pub fn new(relay_client: RI, para_id: ParaId) -> Self { + Self { relay_client, para_id, last_data: None } + } + + /// Fetch required [`RelayChainData`] from the relay chain. + /// If this data has been fetched in the past for the incoming hash, it will reuse + /// cached data. + pub async fn get_relay_chain_data(&mut self) -> Result { + let Ok(relay_parent) = self.relay_client.best_block_hash().await else { + tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block hash."); + return Err(()) + }; + + match &self.last_data { + Some((last_seen_hash, data)) if *last_seen_hash == relay_parent => { + tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Using cached data for relay parent."); + Ok(data.clone()) + }, + _ => { + tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Relay chain best block changed, fetching new data from relay chain."); + let data = self.update_for_relay_parent(relay_parent).await?; + self.last_data = Some((relay_parent, data.clone())); + Ok(data) + }, + } + } + + /// Fetch fresh data from the relay chain for the given relay parent hash. + async fn update_for_relay_parent(&self, relay_parent: RelayHash) -> Result { + let scheduled_cores = + cores_scheduled_for_para(relay_parent, self.para_id, &self.relay_client).await; + let Ok(Some(relay_parent_header)) = + self.relay_client.header(BlockId::Hash(relay_parent)).await + else { + tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block header."); + return Err(()) + }; + + let max_pov_size = match self + .relay_client + .persisted_validation_data(relay_parent, self.para_id, OccupiedCoreAssumption::Included) + .await + { + Ok(None) => return Err(()), + Ok(Some(pvd)) => pvd.max_pov_size, + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); + return Err(()) + }, + }; + + Ok(RelayChainData { + relay_parent_hash: relay_parent, + relay_parent_header, + scheduled_cores, + max_pov_size, + }) + } +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs new file mode 100644 index 0000000000000000000000000000000000000000..5b8151f6302c411469a3258135de2618fc6f5d48 --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -0,0 +1,140 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use codec::Encode; + +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_node_primitives::{MaybeCompressedPoV, SubmitCollationParams}; +use polkadot_node_subsystem::messages::CollationGenerationMessage; +use polkadot_overseer::Handle as OverseerHandle; +use polkadot_primitives::{CollatorPair, Id as ParaId}; + +use futures::prelude::*; + +use sc_utils::mpsc::TracingUnboundedReceiver; +use sp_runtime::traits::{Block as BlockT, Header}; + +use super::CollatorMessage; + +const LOG_TARGET: &str = "aura::cumulus::collation_task"; + +/// Parameters for the collation task. +pub struct Params { + /// A handle to the relay-chain client. + pub relay_client: RClient, + /// The collator key used to sign collations before submitting to validators. + pub collator_key: CollatorPair, + /// The para's ID. + pub para_id: ParaId, + /// Whether we should reinitialize the collator config (i.e. we are transitioning to aura). + pub reinitialize: bool, + /// Collator service interface + pub collator_service: CS, + /// Receiver channel for communication with the block builder task. + pub collator_receiver: TracingUnboundedReceiver>, +} + +/// Asynchronously executes the collation task for a parachain. +/// +/// This function initializes the collator subsystems necessary for producing and submitting +/// collations to the relay chain. It listens for new best relay chain block notifications and +/// handles collator messages. If our parachain is scheduled on a core and we have a candidate, +/// the task will build a collation and send it to the relay chain. +pub async fn run_collation_task(mut params: Params) +where + Block: BlockT, + CS: CollatorServiceInterface + Send + Sync + 'static, + RClient: RelayChainInterface + Clone + 'static, +{ + let Ok(mut overseer_handle) = params.relay_client.overseer_handle() else { + tracing::error!(target: LOG_TARGET, "Failed to get overseer handle."); + return + }; + + cumulus_client_collator::initialize_collator_subsystems( + &mut overseer_handle, + params.collator_key, + params.para_id, + params.reinitialize, + ) + .await; + + let collator_service = params.collator_service; + while let Some(collator_message) = params.collator_receiver.next().await { + handle_collation_message(collator_message, &collator_service, &mut overseer_handle).await; + } +} + +/// Handle an incoming collation message from the block builder task. +/// This builds the collation from the [`CollatorMessage`] and submits it to +/// the collation-generation subsystem of the relay chain. +async fn handle_collation_message( + message: CollatorMessage, + collator_service: &impl CollatorServiceInterface, + overseer_handle: &mut OverseerHandle, +) { + let CollatorMessage { + parent_header, + parachain_candidate, + validation_code_hash, + relay_parent, + core_index, + } = message; + + let hash = parachain_candidate.block.header().hash(); + let number = *parachain_candidate.block.header().number(); + let (collation, block_data) = + match collator_service.build_collation(&parent_header, hash, parachain_candidate) { + Some(collation) => collation, + None => { + tracing::warn!(target: LOG_TARGET, %hash, ?number, ?core_index, "Unable to build collation."); + return; + }, + }; + + tracing::info!( + target: LOG_TARGET, + "PoV size {{ header: {:.2}kB, extrinsics: {:.2}kB, storage_proof: {:.2}kB }}", + block_data.header().encoded_size() as f64 / 1024f64, + block_data.extrinsics().encoded_size() as f64 / 1024f64, + block_data.storage_proof().encoded_size() as f64 / 1024f64, + ); + + if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { + tracing::info!( + target: LOG_TARGET, + "Compressed PoV size: {}kb", + pov.block_data.0.len() as f64 / 1024f64, + ); + } + + tracing::debug!(target: LOG_TARGET, ?core_index, %hash, %number, "Submitting collation for core."); + overseer_handle + .send_msg( + CollationGenerationMessage::SubmitCollation(SubmitCollationParams { + relay_parent, + collation, + parent_head: parent_header.encode().into(), + validation_code_hash, + core_index, + result_sender: None, + }), + "SubmitCollation", + ) + .await; +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..0fe49d58d25be672d8541b486c0aa8f22d825c30 --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -0,0 +1,178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! A collator for Aura that looks ahead of the most recently included parachain block +//! when determining what to build upon. +//! +//! The block building mechanism consists of two parts: +//! 1. A block-builder task that builds parachain blocks at each of our slots. +//! 2. A collator task that transforms the blocks into a collation and submits them to the relay +//! chain. +//! +//! Blocks are built on every parachain slot if there is a core scheduled on the relay chain. At the +//! beginning of each block building loop, we determine how many blocks we expect to build per relay +//! chain block. The collator implementation then expects that we have that many cores scheduled +//! during the relay chain block. After the block is built, the block builder task sends it to +//! the collation task which compresses it and submits it to the collation-generation subsystem. + +use codec::Codec; +use consensus_common::ParachainCandidate; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::CollectCollationInfo; +use cumulus_relay_chain_interface::RelayChainInterface; +use polkadot_primitives::{ + CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, +}; + +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; +use sc_consensus::BlockImport; +use sc_utils::mpsc::tracing_unbounded; + +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_blockchain::HeaderBackend; +use sp_consensus_aura::AuraApi; +use sp_core::crypto::Pair; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, Member}; + +use std::{sync::Arc, time::Duration}; + +use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; + +mod block_builder_task; +mod collation_task; + +/// Parameters for [`run`]. +pub struct Params { + /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. + /// the timestamp, slot, and paras inherents should be omitted, as they are set by this + /// collator. + pub create_inherent_data_providers: CIDP, + /// Used to actually import blocks. + pub block_import: BI, + /// The underlying para client. + pub para_client: Arc, + /// The para client's backend, used to access the database. + pub para_backend: Arc, + /// A handle to the relay-chain client. + pub relay_client: RClient, + /// A validation code hash provider, used to get the current validation code hash. + pub code_hash_provider: CHP, + /// The underlying keystore, which should contain Aura consensus keys. + pub keystore: KeystorePtr, + /// The collator key used to sign collations before submitting to validators. + pub collator_key: CollatorPair, + /// The para's ID. + pub para_id: ParaId, + /// The length of slots in the relay chain. + pub relay_chain_slot_duration: Duration, + /// The underlying block proposer this should call into. + pub proposer: Proposer, + /// The generic collator service used to plug into this consensus engine. + pub collator_service: CS, + /// The amount of time to spend authoring each block. + pub authoring_duration: Duration, + /// Whether we should reinitialize the collator config (i.e. we are transitioning to aura). + pub reinitialize: bool, + /// Drift slots by a fixed duration. This can be used to create more preferrable authoring + /// timings. + pub slot_drift: Duration, +} + +/// Run aura-based block building and collation task. +pub fn run( + params: Params, +) -> (impl futures::Future, impl futures::Future) +where + Block: BlockT, + Client: ProvideRuntimeApi + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + UsageProvider + + Send + + Sync + + 'static, + Client::Api: + AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Backend: sc_client_api::Backend + 'static, + RClient: RelayChainInterface + Clone + 'static, + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + Clone + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + P: Pair + 'static, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, +{ + let (tx, rx) = tracing_unbounded("mpsc_builder_to_collator", 100); + let collator_task_params = collation_task::Params { + relay_client: params.relay_client.clone(), + collator_key: params.collator_key, + para_id: params.para_id, + reinitialize: params.reinitialize, + collator_service: params.collator_service.clone(), + collator_receiver: rx, + }; + + let collation_task_fut = run_collation_task::(collator_task_params); + + let block_builder_params = block_builder_task::BuilderTaskParams { + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + para_client: params.para_client, + para_backend: params.para_backend, + relay_client: params.relay_client, + code_hash_provider: params.code_hash_provider, + keystore: params.keystore, + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + authoring_duration: params.authoring_duration, + collator_sender: tx, + relay_chain_slot_duration: params.relay_chain_slot_duration, + slot_drift: params.slot_drift, + }; + + let block_builder_fut = + run_block_builder::(block_builder_params); + + (collation_task_fut, block_builder_fut) +} + +/// Message to be sent from the block builder to the collation task. +/// +/// Contains all data necessary to submit a collation to the relay chain. +struct CollatorMessage { + /// The hash of the relay chain block that provides the context for the parachain block. + pub relay_parent: RelayHash, + /// The header of the parent block. + pub parent_header: Block::Header, + /// The parachain block candidate. + pub parachain_candidate: ParachainCandidate, + /// The validation code hash at the parent block. + pub validation_code_hash: ValidationCodeHash, + /// Core index that this block should be submitted on + pub core_index: CoreIndex, +} diff --git a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs index be554bdcfc79b986f11da24404a47556031657b2..68f2d37c8748863be879134d3fd0849adf5efb11 100644 --- a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs +++ b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs @@ -21,6 +21,7 @@ /// should be thrown out and which ones should be kept. use codec::Codec; use cumulus_client_consensus_common::ParachainBlockImportMarker; +use parking_lot::Mutex; use schnellru::{ByLength, LruMap}; use sc_consensus::{ @@ -70,7 +71,7 @@ impl NaiveEquivocationDefender { struct Verifier { client: Arc, create_inherent_data_providers: CIDP, - defender: NaiveEquivocationDefender, + defender: Mutex, telemetry: Option, _phantom: std::marker::PhantomData (Block, P)>, } @@ -88,7 +89,7 @@ where CIDP: CreateInherentDataProviders, { async fn verify( - &mut self, + &self, mut block_params: BlockImportParams, ) -> Result, String> { // Skip checks that include execution, if being told so, or when importing only state. @@ -137,7 +138,7 @@ where block_params.post_hash = Some(post_hash); // Check for and reject egregious amounts of equivocations. - if self.defender.insert_and_check(slot) { + if self.defender.lock().insert_and_check(slot) { return Err(format!( "Rejecting block {:?} due to excessive equivocations at slot", post_hash, @@ -243,7 +244,7 @@ where let verifier = Verifier:: { client, create_inherent_data_providers, - defender: NaiveEquivocationDefender::default(), + defender: Mutex::new(NaiveEquivocationDefender::default()), telemetry, _phantom: std::marker::PhantomData, }; diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 09c2f58d45e4e04ca420d6280c39eed12e39ad4f..4bc2f1d1e600e5f82faaf7cfa84a3b831cf085b7 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -10,42 +10,42 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -dyn-clone = "1.0.16" -futures = "0.3.28" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +dyn-clone = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -tracing = "0.1.37" +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../../substrate/client/api" } -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-consensus-slots = { path = "../../../../substrate/primitives/consensus/slots" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } -sp-trie = { path = "../../../../substrate/primitives/trie" } -sp-version = { path = "../../../../substrate/primitives/version" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../../polkadot/primitives" } +polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../../primitives/core" } -cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } -cumulus-client-pov-recovery = { path = "../../pov-recovery" } -schnellru = "0.2.1" +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } +schnellru = { workspace = true } [dev-dependencies] -futures-timer = "3.0.2" +futures-timer = { workspace = true } # Substrate -sp-tracing = { path = "../../../../substrate/primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } # Cumulus -cumulus-test-client = { path = "../../../test/client" } -cumulus-test-relay-sproof-builder = { path = "../../../test/relay-sproof-builder" } +cumulus-test-client = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/common/src/import_queue.rs b/cumulus/client/consensus/common/src/import_queue.rs index 8024b7695a285a414c89cea74561ece91dfd48de..488693604fefccbe2c9b37a22c8ee3c4809383f2 100644 --- a/cumulus/client/consensus/common/src/import_queue.rs +++ b/cumulus/client/consensus/common/src/import_queue.rs @@ -50,7 +50,7 @@ pub struct VerifyNothing; #[async_trait::async_trait] impl Verifier for VerifyNothing { async fn verify( - &mut self, + &self, params: BlockImportParams, ) -> Result, String> { Ok(params) diff --git a/cumulus/client/consensus/common/src/lib.rs b/cumulus/client/consensus/common/src/lib.rs index cebe34e7ea58828372a9261e3be94866e119546a..e12750dcc553f9983f3432c9bf2ad156318a4cc8 100644 --- a/cumulus/client/consensus/common/src/lib.rs +++ b/cumulus/client/consensus/common/src/lib.rs @@ -19,16 +19,13 @@ use polkadot_primitives::{ Block as PBlock, Hash as PHash, Header as PHeader, PersistedValidationData, ValidationCodeHash, }; -use cumulus_primitives_core::{ - relay_chain::{self, BlockId as RBlockId, OccupiedCoreAssumption}, - AbridgedHostConfiguration, ParaId, -}; +use cumulus_primitives_core::{relay_chain, AbridgedHostConfiguration}; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; -use sc_client_api::{Backend, HeaderBackend}; +use sc_client_api::Backend; use sc_consensus::{shared_data::SharedData, BlockImport, ImportResult}; -use sp_blockchain::Backend as BlockchainBackend; use sp_consensus_slots::Slot; + use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_timestamp::Timestamp; @@ -36,9 +33,12 @@ use std::{sync::Arc, time::Duration}; mod level_monitor; mod parachain_consensus; +mod parent_search; #[cfg(test)] mod tests; +pub use parent_search::*; + pub use parachain_consensus::run_parachain_consensus; use level_monitor::LevelMonitor; @@ -172,13 +172,13 @@ impl Clone for ParachainBlockImport { impl BlockImport for ParachainBlockImport where Block: BlockT, - BI: BlockImport + Send, + BI: BlockImport + Send + Sync, BE: Backend, { type Error = BI::Error; async fn check_block( - &mut self, + &self, block: sc_consensus::BlockCheckParams, ) -> Result { self.inner.check_block(block).await @@ -229,196 +229,6 @@ pub trait ParachainBlockImportMarker {} impl ParachainBlockImportMarker for ParachainBlockImport {} -/// Parameters when searching for suitable parents to build on top of. -#[derive(Debug)] -pub struct ParentSearchParams { - /// The relay-parent that is intended to be used. - pub relay_parent: PHash, - /// The ID of the parachain. - pub para_id: ParaId, - /// A limitation on the age of relay parents for parachain blocks that are being - /// considered. This is relative to the `relay_parent` number. - pub ancestry_lookback: usize, - /// How "deep" parents can be relative to the included parachain block at the relay-parent. - /// The included block has depth 0. - pub max_depth: usize, - /// Whether to only ignore "alternative" branches, i.e. branches of the chain - /// which do not contain the block pending availability. - pub ignore_alternative_branches: bool, -} - -/// A potential parent block returned from [`find_potential_parents`] -#[derive(Debug, PartialEq)] -pub struct PotentialParent { - /// The hash of the block. - pub hash: B::Hash, - /// The header of the block. - pub header: B::Header, - /// The depth of the block. - pub depth: usize, - /// Whether the block is the included block, is itself pending on-chain, or descends - /// from the block pending availability. - pub aligned_with_pending: bool, -} - -/// Perform a recursive search through blocks to find potential -/// parent blocks for a new block. -/// -/// This accepts a relay-chain block to be used as an anchor and a maximum search depth, -/// along with some arguments for filtering parachain blocks and performs a recursive search -/// for parachain blocks. The search begins at the last included parachain block and returns -/// a set of [`PotentialParent`]s which could be potential parents of a new block with this -/// relay-parent according to the search parameters. -/// -/// A parachain block is a potential parent if it is either the last included parachain block, the -/// pending parachain block (when `max_depth` >= 1), or all of the following hold: -/// * its parent is a potential parent -/// * its relay-parent is within `ancestry_lookback` of the targeted relay-parent. -/// * its relay-parent is within the same session as the targeted relay-parent. -/// * the block number is within `max_depth` blocks of the included block -pub async fn find_potential_parents( - params: ParentSearchParams, - client: &impl Backend, - relay_client: &impl RelayChainInterface, -) -> Result>, RelayChainError> { - // 1. Build up the ancestry record of the relay chain to compare against. - let rp_ancestry = { - let mut ancestry = Vec::with_capacity(params.ancestry_lookback + 1); - let mut current_rp = params.relay_parent; - let mut required_session = None; - - while ancestry.len() <= params.ancestry_lookback { - let header = match relay_client.header(RBlockId::hash(current_rp)).await? { - None => break, - Some(h) => h, - }; - - let session = relay_client.session_index_for_child(current_rp).await?; - if let Some(required_session) = required_session { - // Respect the relay-chain rule not to cross session boundaries. - if session != required_session { - break - } - } else { - required_session = Some(session); - } - - ancestry.push((current_rp, *header.state_root())); - current_rp = *header.parent_hash(); - - // don't iterate back into the genesis block. - if header.number == 1 { - break - } - } - - ancestry - }; - - let is_hash_in_ancestry = |hash| rp_ancestry.iter().any(|x| x.0 == hash); - let is_root_in_ancestry = |root| rp_ancestry.iter().any(|x| x.1 == root); - - // 2. Get the included and pending availability blocks. - let included_header = relay_client - .persisted_validation_data( - params.relay_parent, - params.para_id, - OccupiedCoreAssumption::TimedOut, - ) - .await?; - - let included_header = match included_header { - Some(pvd) => pvd.parent_head, - None => return Ok(Vec::new()), // this implies the para doesn't exist. - }; - - let pending_header = relay_client - .persisted_validation_data( - params.relay_parent, - params.para_id, - OccupiedCoreAssumption::Included, - ) - .await? - .and_then(|x| if x.parent_head != included_header { Some(x.parent_head) } else { None }); - - let included_header = match B::Header::decode(&mut &included_header.0[..]).ok() { - None => return Ok(Vec::new()), - Some(x) => x, - }; - // Silently swallow if pending block can't decode. - let pending_header = pending_header.and_then(|p| B::Header::decode(&mut &p.0[..]).ok()); - let included_hash = included_header.hash(); - let pending_hash = pending_header.as_ref().map(|hdr| hdr.hash()); - - let mut frontier = vec![PotentialParent:: { - hash: included_hash, - header: included_header, - depth: 0, - aligned_with_pending: true, - }]; - - // Recursive search through descendants of the included block which have acceptable - // relay parents. - let mut potential_parents = Vec::new(); - while let Some(entry) = frontier.pop() { - let is_pending = - entry.depth == 1 && pending_hash.as_ref().map_or(false, |h| &entry.hash == h); - let is_included = entry.depth == 0; - - // note: even if the pending block or included block have a relay parent - // outside of the expected part of the relay chain, they are always allowed - // because they have already been posted on chain. - let is_potential = is_pending || is_included || { - let digest = entry.header.digest(); - cumulus_primitives_core::extract_relay_parent(digest).map_or(false, is_hash_in_ancestry) || - cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root(digest) - .map(|(r, _n)| r) - .map_or(false, is_root_in_ancestry) - }; - - let parent_aligned_with_pending = entry.aligned_with_pending; - let child_depth = entry.depth + 1; - let hash = entry.hash; - - if is_potential { - potential_parents.push(entry); - } - - if !is_potential || child_depth > params.max_depth { - continue - } - - // push children onto search frontier. - for child in client.blockchain().children(hash).ok().into_iter().flatten() { - let aligned_with_pending = parent_aligned_with_pending && - if child_depth == 1 { - pending_hash.as_ref().map_or(true, |h| &child == h) - } else { - true - }; - - if params.ignore_alternative_branches && !aligned_with_pending { - continue - } - - let header = match client.blockchain().header(child) { - Ok(Some(h)) => h, - Ok(None) => continue, - Err(_) => continue, - }; - - frontier.push(PotentialParent { - hash: child, - header, - depth: child_depth, - aligned_with_pending, - }); - } - } - - Ok(potential_parents) -} - /// Get the relay-parent slot and timestamp from a header. pub fn relay_slot_and_timestamp( relay_parent_header: &PHeader, diff --git a/cumulus/client/consensus/common/src/parachain_consensus.rs b/cumulus/client/consensus/common/src/parachain_consensus.rs index b4b315bb32be6ea18d7ae9399cafe4640096f2b4..944917673b119732a587adf3596ae59b829e30b5 100644 --- a/cumulus/client/consensus/common/src/parachain_consensus.rs +++ b/cumulus/client/consensus/common/src/parachain_consensus.rs @@ -375,60 +375,61 @@ async fn handle_new_best_parachain_head( target: LOG_TARGET, block_hash = ?hash, "Skipping set new best block, because block is already the best.", - ) - } else { - // Make sure the block is already known or otherwise we skip setting new best. - match parachain.block_status(hash) { - Ok(BlockStatus::InChainWithState) => { - unset_best_header.take(); - tracing::debug!( - target: LOG_TARGET, - ?hash, - "Importing block as new best for parachain.", - ); - import_block_as_new_best(hash, parachain_head, parachain).await; - }, - Ok(BlockStatus::InChainPruned) => { - tracing::error!( - target: LOG_TARGET, - block_hash = ?hash, - "Trying to set pruned block as new best!", - ); - }, - Ok(BlockStatus::Unknown) => { - *unset_best_header = Some(parachain_head); + ); + return; + } - tracing::debug!( - target: LOG_TARGET, - block_hash = ?hash, - "Parachain block not yet imported, waiting for import to enact as best block.", - ); - - if let Some(ref mut recovery_chan_tx) = recovery_chan_tx { - // Best effort channel to actively encourage block recovery. - // An error here is not fatal; the relay chain continuously re-announces - // the best block, thus we will have other opportunities to retry. - let req = RecoveryRequest { hash, kind: RecoveryKind::Full }; - if let Err(err) = recovery_chan_tx.try_send(req) { - tracing::warn!( - target: LOG_TARGET, - block_hash = ?hash, - error = ?err, - "Unable to notify block recovery subsystem" - ) - } + // Make sure the block is already known or otherwise we skip setting new best. + match parachain.block_status(hash) { + Ok(BlockStatus::InChainWithState) => { + unset_best_header.take(); + tracing::debug!( + target: LOG_TARGET, + included = ?hash, + "Importing block as new best for parachain.", + ); + import_block_as_new_best(hash, parachain_head, parachain).await; + }, + Ok(BlockStatus::InChainPruned) => { + tracing::error!( + target: LOG_TARGET, + block_hash = ?hash, + "Trying to set pruned block as new best!", + ); + }, + Ok(BlockStatus::Unknown) => { + *unset_best_header = Some(parachain_head); + + tracing::debug!( + target: LOG_TARGET, + block_hash = ?hash, + "Parachain block not yet imported, waiting for import to enact as best block.", + ); + + if let Some(ref mut recovery_chan_tx) = recovery_chan_tx { + // Best effort channel to actively encourage block recovery. + // An error here is not fatal; the relay chain continuously re-announces + // the best block, thus we will have other opportunities to retry. + let req = RecoveryRequest { hash, kind: RecoveryKind::Full }; + if let Err(err) = recovery_chan_tx.try_send(req) { + tracing::warn!( + target: LOG_TARGET, + block_hash = ?hash, + error = ?err, + "Unable to notify block recovery subsystem" + ) } - }, - Err(e) => { - tracing::error!( - target: LOG_TARGET, - block_hash = ?hash, - error = ?e, - "Failed to get block status of block.", - ); - }, - _ => {}, - } + } + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + block_hash = ?hash, + error = ?e, + "Failed to get block status of block.", + ); + }, + _ => {}, } } diff --git a/cumulus/client/consensus/common/src/parent_search.rs b/cumulus/client/consensus/common/src/parent_search.rs new file mode 100644 index 0000000000000000000000000000000000000000..c371ec62f8455cacc7d6a2d7b1ba71e142661fff --- /dev/null +++ b/cumulus/client/consensus/common/src/parent_search.rs @@ -0,0 +1,418 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use codec::Decode; +use polkadot_primitives::Hash as RelayHash; + +use cumulus_primitives_core::{ + relay_chain::{BlockId as RBlockId, OccupiedCoreAssumption}, + ParaId, +}; +use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; + +use sc_client_api::{Backend, HeaderBackend}; + +use sp_blockchain::{Backend as BlockchainBackend, TreeRoute}; + +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; + +const PARENT_SEARCH_LOG_TARGET: &str = "consensus::common::find_potential_parents"; + +/// Parameters when searching for suitable parents to build on top of. +#[derive(Debug)] +pub struct ParentSearchParams { + /// The relay-parent that is intended to be used. + pub relay_parent: RelayHash, + /// The ID of the parachain. + pub para_id: ParaId, + /// A limitation on the age of relay parents for parachain blocks that are being + /// considered. This is relative to the `relay_parent` number. + pub ancestry_lookback: usize, + /// How "deep" parents can be relative to the included parachain block at the relay-parent. + /// The included block has depth 0. + pub max_depth: usize, + /// Whether to only ignore "alternative" branches, i.e. branches of the chain + /// which do not contain the block pending availability. + pub ignore_alternative_branches: bool, +} + +/// A potential parent block returned from [`find_potential_parents`] +#[derive(PartialEq)] +pub struct PotentialParent { + /// The hash of the block. + pub hash: B::Hash, + /// The header of the block. + pub header: B::Header, + /// The depth of the block with respect to the included block. + pub depth: usize, + /// Whether the block is the included block, is itself pending on-chain, or descends + /// from the block pending availability. + pub aligned_with_pending: bool, +} + +impl std::fmt::Debug for PotentialParent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("PotentialParent") + .field("hash", &self.hash) + .field("depth", &self.depth) + .field("aligned_with_pending", &self.aligned_with_pending) + .field("number", &self.header.number()) + .finish() + } +} + +/// Perform a recursive search through blocks to find potential +/// parent blocks for a new block. +/// +/// This accepts a relay-chain block to be used as an anchor and a maximum search depth, +/// along with some arguments for filtering parachain blocks and performs a recursive search +/// for parachain blocks. The search begins at the last included parachain block and returns +/// a set of [`PotentialParent`]s which could be potential parents of a new block with this +/// relay-parent according to the search parameters. +/// +/// A parachain block is a potential parent if it is either the last included parachain block, the +/// pending parachain block (when `max_depth` >= 1), or all of the following hold: +/// * its parent is a potential parent +/// * its relay-parent is within `ancestry_lookback` of the targeted relay-parent. +/// * its relay-parent is within the same session as the targeted relay-parent. +/// * the block number is within `max_depth` blocks of the included block +pub async fn find_potential_parents( + params: ParentSearchParams, + backend: &impl Backend, + relay_client: &impl RelayChainInterface, +) -> Result>, RelayChainError> { + tracing::trace!("Parent search parameters: {params:?}"); + // Get the included block. + let Some((included_header, included_hash)) = + fetch_included_from_relay_chain(relay_client, backend, params.para_id, params.relay_parent) + .await? + else { + return Ok(Default::default()) + }; + + let only_included = vec![PotentialParent { + hash: included_hash, + header: included_header.clone(), + depth: 0, + aligned_with_pending: true, + }]; + + if params.max_depth == 0 { + return Ok(only_included) + }; + + // Pending header and hash. + let maybe_pending = { + // Fetch the most recent pending header from the relay chain. We use + // `OccupiedCoreAssumption::Included` so the candidate pending availability gets enacted + // before being returned to us. + let pending_header = relay_client + .persisted_validation_data( + params.relay_parent, + params.para_id, + OccupiedCoreAssumption::Included, + ) + .await? + .and_then(|p| B::Header::decode(&mut &p.parent_head.0[..]).ok()) + .filter(|x| x.hash() != included_hash); + + // If the pending block is not locally known, we can't do anything. + if let Some(header) = pending_header { + let pending_hash = header.hash(); + match backend.blockchain().header(pending_hash) { + // We are supposed to ignore branches that don't contain the pending block, but we + // do not know the pending block locally. + Ok(None) | Err(_) if params.ignore_alternative_branches => { + tracing::warn!( + target: PARENT_SEARCH_LOG_TARGET, + %pending_hash, + "Failed to get header for pending block.", + ); + return Ok(Default::default()) + }, + Ok(Some(_)) => Some((header, pending_hash)), + _ => None, + } + } else { + None + } + }; + + let maybe_route_to_last_pending = maybe_pending + .as_ref() + .map(|(_, pending)| { + sp_blockchain::tree_route(backend.blockchain(), included_hash, *pending) + }) + .transpose()?; + + // If we want to ignore alternative branches there is no reason to start + // the parent search at the included block. We can add the included block and + // the path to the pending block to the potential parents directly (limited by max_depth). + let (frontier, potential_parents) = match ( + &maybe_pending, + params.ignore_alternative_branches, + &maybe_route_to_last_pending, + ) { + (Some((pending_header, pending_hash)), true, Some(ref route_to_pending)) => { + let mut potential_parents = only_included; + + // This is a defensive check, should never happen. + if !route_to_pending.retracted().is_empty() { + tracing::warn!(target: PARENT_SEARCH_LOG_TARGET, "Included block not an ancestor of pending block. This should not happen."); + return Ok(Default::default()) + } + + // Add all items on the path included -> pending - 1 to the potential parents, but + // not more than `max_depth`. + let num_parents_on_path = + route_to_pending.enacted().len().saturating_sub(1).min(params.max_depth); + for (num, block) in + route_to_pending.enacted().iter().take(num_parents_on_path).enumerate() + { + let Ok(Some(header)) = backend.blockchain().header(block.hash) else { continue }; + + potential_parents.push(PotentialParent { + hash: block.hash, + header, + depth: 1 + num, + aligned_with_pending: true, + }); + } + + // The search for additional potential parents should now start at the children of + // the pending block. + ( + vec![PotentialParent { + hash: *pending_hash, + header: pending_header.clone(), + depth: route_to_pending.enacted().len(), + aligned_with_pending: true, + }], + potential_parents, + ) + }, + _ => (only_included, Default::default()), + }; + + if potential_parents.len() > params.max_depth { + return Ok(potential_parents); + } + + // Build up the ancestry record of the relay chain to compare against. + let rp_ancestry = + build_relay_parent_ancestry(params.ancestry_lookback, params.relay_parent, relay_client) + .await?; + + Ok(search_child_branches_for_parents( + frontier, + maybe_route_to_last_pending, + included_header, + maybe_pending.map(|(_, hash)| hash), + backend, + params.max_depth, + params.ignore_alternative_branches, + rp_ancestry, + potential_parents, + )) +} + +/// Fetch the included block from the relay chain. +async fn fetch_included_from_relay_chain( + relay_client: &impl RelayChainInterface, + backend: &impl Backend, + para_id: ParaId, + relay_parent: RelayHash, +) -> Result, RelayChainError> { + // Fetch the pending header from the relay chain. We use `OccupiedCoreAssumption::TimedOut` + // so that even if there is a pending candidate, we assume it is timed out and we get the + // included head. + let included_header = relay_client + .persisted_validation_data(relay_parent, para_id, OccupiedCoreAssumption::TimedOut) + .await?; + let included_header = match included_header { + Some(pvd) => pvd.parent_head, + None => return Ok(None), // this implies the para doesn't exist. + }; + + let included_header = match B::Header::decode(&mut &included_header.0[..]).ok() { + None => return Ok(None), + Some(x) => x, + }; + + let included_hash = included_header.hash(); + // If the included block is not locally known, we can't do anything. + match backend.blockchain().header(included_hash) { + Ok(None) => { + tracing::warn!( + target: PARENT_SEARCH_LOG_TARGET, + %included_hash, + "Failed to get header for included block.", + ); + return Ok(None) + }, + Err(e) => { + tracing::warn!( + target: PARENT_SEARCH_LOG_TARGET, + %included_hash, + %e, + "Failed to get header for included block.", + ); + return Ok(None) + }, + _ => {}, + }; + + Ok(Some((included_header, included_hash))) +} + +/// Build an ancestry of relay parents that are acceptable. +/// +/// An acceptable relay parent is one that is no more than `ancestry_lookback` + 1 blocks below the +/// relay parent we want to build on. Parachain blocks anchored on relay parents older than that can +/// not be considered potential parents for block building. They have no chance of still getting +/// included, so our newly build parachain block would also not get included. +/// +/// On success, returns a vector of `(header_hash, state_root)` of the relevant relay chain +/// ancestry blocks. +async fn build_relay_parent_ancestry( + ancestry_lookback: usize, + relay_parent: RelayHash, + relay_client: &impl RelayChainInterface, +) -> Result, RelayChainError> { + let mut ancestry = Vec::with_capacity(ancestry_lookback + 1); + let mut current_rp = relay_parent; + let mut required_session = None; + while ancestry.len() <= ancestry_lookback { + let Some(header) = relay_client.header(RBlockId::hash(current_rp)).await? else { break }; + + let session = relay_client.session_index_for_child(current_rp).await?; + if required_session.get_or_insert(session) != &session { + // Respect the relay-chain rule not to cross session boundaries. + break; + } + + ancestry.push((current_rp, *header.state_root())); + current_rp = *header.parent_hash(); + + // don't iterate back into the genesis block. + if header.number == 1 { + break + } + } + Ok(ancestry) +} + +/// Start search for child blocks that can be used as parents. +pub fn search_child_branches_for_parents( + mut frontier: Vec>, + maybe_route_to_last_pending: Option>, + included_header: Block::Header, + pending_hash: Option, + backend: &impl Backend, + max_depth: usize, + ignore_alternative_branches: bool, + rp_ancestry: Vec<(RelayHash, RelayHash)>, + mut potential_parents: Vec>, +) -> Vec> { + let included_hash = included_header.hash(); + let is_hash_in_ancestry = |hash| rp_ancestry.iter().any(|x| x.0 == hash); + let is_root_in_ancestry = |root| rp_ancestry.iter().any(|x| x.1 == root); + + // The distance between pending and included block. Is later used to check if a child + // is aligned with pending when it is between pending and included block. + let pending_distance = maybe_route_to_last_pending.as_ref().map(|route| route.enacted().len()); + + // If a block is on the path included -> pending, we consider it `aligned_with_pending`. + let is_child_pending = |hash| { + maybe_route_to_last_pending + .as_ref() + .map_or(true, |route| route.enacted().iter().any(|x| x.hash == hash)) + }; + + tracing::trace!( + target: PARENT_SEARCH_LOG_TARGET, + ?included_hash, + included_num = ?included_header.number(), + ?pending_hash , + ?rp_ancestry, + "Searching relay chain ancestry." + ); + while let Some(entry) = frontier.pop() { + let is_pending = pending_hash.as_ref().map_or(false, |h| &entry.hash == h); + let is_included = included_hash == entry.hash; + + // note: even if the pending block or included block have a relay parent + // outside of the expected part of the relay chain, they are always allowed + // because they have already been posted on chain. + let is_potential = is_pending || is_included || { + let digest = entry.header.digest(); + let is_hash_in_ancestry_check = cumulus_primitives_core::extract_relay_parent(digest) + .map_or(false, is_hash_in_ancestry); + let is_root_in_ancestry_check = + cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root(digest) + .map(|(r, _n)| r) + .map_or(false, is_root_in_ancestry); + + is_hash_in_ancestry_check || is_root_in_ancestry_check + }; + + let parent_aligned_with_pending = entry.aligned_with_pending; + let child_depth = entry.depth + 1; + let hash = entry.hash; + + tracing::trace!( + target: PARENT_SEARCH_LOG_TARGET, + ?hash, + is_potential, + is_pending, + is_included, + "Checking potential parent." + ); + + if is_potential { + potential_parents.push(entry); + } + + if !is_potential || child_depth > max_depth { + continue + } + + // push children onto search frontier. + for child in backend.blockchain().children(hash).ok().into_iter().flatten() { + tracing::trace!(target: PARENT_SEARCH_LOG_TARGET, ?child, child_depth, ?pending_distance, "Looking at child."); + + let aligned_with_pending = parent_aligned_with_pending && + (pending_distance.map_or(true, |dist| child_depth > dist) || + is_child_pending(child)); + + if ignore_alternative_branches && !aligned_with_pending { + tracing::trace!(target: PARENT_SEARCH_LOG_TARGET, ?child, "Child is not aligned with pending block."); + continue + } + + let Ok(Some(header)) = backend.blockchain().header(child) else { continue }; + + frontier.push(PotentialParent { + hash: child, + header, + depth: child_depth, + aligned_with_pending, + }); + } + } + + potential_parents +} diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 2a944bc7f9fa221d63c48678f2899d50251b3655..284fa39ed1e704dd7594861c6f0c3264df75acfb 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -20,7 +20,7 @@ use async_trait::async_trait; use codec::Encode; use cumulus_client_pov_recovery::RecoveryKind; use cumulus_primitives_core::{ - relay_chain::{self, BlockId}, + relay_chain::{BlockId, BlockNumber, CoreState}, CumulusDigestItem, InboundDownwardMessage, InboundHrmpMessage, }; use cumulus_relay_chain_interface::{ @@ -37,6 +37,7 @@ use futures_timer::Delay; use polkadot_primitives::HeadData; use sc_client_api::{Backend as _, UsageProvider}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; +use sp_blockchain::Backend as BlockchainBackend; use sp_consensus::{BlockOrigin, BlockStatus}; use sp_version::RuntimeVersion; use std::{ @@ -46,11 +47,11 @@ use std::{ time::Duration, }; -fn relay_block_num_from_hash(hash: &PHash) -> relay_chain::BlockNumber { +fn relay_block_num_from_hash(hash: &PHash) -> BlockNumber { hash.to_low_u64_be() as u32 } -fn relay_hash_from_block_num(block_number: relay_chain::BlockNumber) -> PHash { +fn relay_hash_from_block_num(block_number: BlockNumber) -> PHash { PHash::from_low_u64_be(block_number as u64) } @@ -257,6 +258,13 @@ impl RelayChainInterface for Relaychain { })) } + async fn availability_cores( + &self, + _relay_parent: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test"); + } + async fn version(&self, _: PHash) -> RelayChainResult { unimplemented!("Not needed for test") } @@ -1138,6 +1146,357 @@ fn find_potential_parents_with_max_depth() { } } +#[test] +fn find_potential_parents_unknown_included() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let relay_parent = relay_hash_from_block_num(10); + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + + let sproof = sproof_with_best_parent(&client); + let included_but_unknown = build_block(&*client, sproof, None, None, Some(relay_parent)); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_but_unknown.header().clone()); + } + + // Ignore alternative branch: + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth: NON_INCLUDED_CHAIN_LEN, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + assert_eq!(potential_parents.len(), 0); +} + +#[test] +fn find_potential_parents_unknown_pending() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let sproof = sproof_with_parent_by_hash(&client, included_block.header().hash()); + let pending_but_unknown = build_block( + &*client, + sproof, + Some(included_block.header().hash()), + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, pending_but_unknown.header().clone()); + } + + // Ignore alternative branch: + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth: NON_INCLUDED_CHAIN_LEN, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + assert!(potential_parents.is_empty()); +} + +#[test] +fn find_potential_parents_unknown_pending_include_alternative_branches() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let alt_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(included_block.header().hash()), + None, + Some(search_relay_parent), + ); + + tracing::info!(hash = %alt_block.header().hash(), "Alt block."); + let sproof = sproof_with_parent_by_hash(&client, included_block.header().hash()); + let pending_but_unknown = build_block( + &*client, + sproof, + Some(included_block.header().hash()), + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, pending_but_unknown.header().clone()); + } + + // Ignore alternative branch: + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth: NON_INCLUDED_CHAIN_LEN, + ignore_alternative_branches: false, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + let expected_parents: Vec<_> = vec![&included_block, &alt_block]; + assert_eq!(potential_parents.len(), 2); + assert_eq!(expected_parents[0].hash(), potential_parents[0].hash); + assert_eq!(expected_parents[1].hash(), potential_parents[1].hash); +} + +/// Test where there are multiple pending blocks. +#[test] +fn find_potential_parents_aligned_with_late_pending() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let in_between_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(included_block.header().hash()), + None, + Some(relay_parent), + ); + + let pending_block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(in_between_block.header().hash()), + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, in_between_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, pending_block.header().clone()); + } + + // Build some blocks on the pending block and on the included block. + // We end up with two sibling chains, one is aligned with the pending block, + // the other is not. + let mut aligned_blocks = Vec::new(); + let mut parent = pending_block.header().hash(); + for _ in 2..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(parent), + None, + Some(relay_parent), + ); + parent = block.header().hash(); + aligned_blocks.push(block); + } + + let mut alt_blocks = Vec::new(); + let mut parent = included_block.header().hash(); + for _ in 0..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(parent), + None, + Some(search_relay_parent), + ); + parent = block.header().hash(); + alt_blocks.push(block); + } + + // Ignore alternative branch: + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + assert_eq!(potential_parents.len(), max_depth + 1); + let expected_parents: Vec<_> = [&included_block, &in_between_block, &pending_block] + .into_iter() + .chain(aligned_blocks.iter()) + .take(max_depth + 1) + .collect(); + + for i in 0..(max_depth + 1) { + let parent = &potential_parents[i]; + let expected = &expected_parents[i]; + + assert_eq!(parent.hash, expected.hash()); + assert_eq!(&parent.header, expected.header()); + assert_eq!(parent.depth, i); + assert!(parent.aligned_with_pending); + } + } + + // Do not ignore: + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth, + ignore_alternative_branches: false, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + let expected_len = 2 * max_depth + 1; + assert_eq!(potential_parents.len(), expected_len); + let expected_aligned: Vec<_> = [&included_block, &in_between_block, &pending_block] + .into_iter() + .chain(aligned_blocks.iter()) + .take(max_depth + 1) + .collect(); + let expected_alt = alt_blocks.iter().take(max_depth); + + let expected_parents: Vec<_> = + expected_aligned.clone().into_iter().chain(expected_alt).collect(); + // Check correctness. + assert_eq!(expected_parents.len(), expected_len); + + for i in 0..expected_len { + let parent = &potential_parents[i]; + let expected = expected_parents + .iter() + .find(|block| block.header().hash() == parent.hash) + .expect("missing parent"); + + let is_aligned = expected_aligned.contains(&expected); + + assert_eq!(parent.hash, expected.hash()); + assert_eq!(&parent.header, expected.header()); + + assert_eq!(parent.aligned_with_pending, is_aligned); + } + } +} + #[test] fn find_potential_parents_aligned_with_pending() { sp_tracing::try_init_simple(); @@ -1249,6 +1608,7 @@ fn find_potential_parents_aligned_with_pending() { // Do not ignore: for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + log::info!("Ran with max_depth = {max_depth}"); let potential_parents = block_on(find_potential_parents( ParentSearchParams { relay_parent: search_relay_parent, @@ -1276,6 +1636,7 @@ fn find_potential_parents_aligned_with_pending() { // Check correctness. assert_eq!(expected_parents.len(), expected_len); + potential_parents.iter().for_each(|p| log::info!("result: {:?}", p)); for i in 0..expected_len { let parent = &potential_parents[i]; let expected = expected_parents @@ -1288,6 +1649,12 @@ fn find_potential_parents_aligned_with_pending() { assert_eq!(parent.hash, expected.hash()); assert_eq!(&parent.header, expected.header()); + log::info!( + "Check hash: {:?} expected: {} is: {}", + parent.hash, + is_aligned, + parent.aligned_with_pending, + ); assert_eq!(parent.aligned_with_pending, is_aligned); } } diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index 42ca4e06f8f45e410006304cf1290e811e153b4c..ce91d48bf589a88e5eb0f81507e73da3d28a6b81 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -10,15 +10,15 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -anyhow = "1.0" -async-trait = "0.1.79" +anyhow = { workspace = true } +async-trait = { workspace = true } thiserror = { workspace = true } # Substrate -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } +sp-consensus = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent" } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index 7c3a901db6c32131e762c3de971606fffddebc80..f3ee6fc2f7d257ff2be86c3d9a0096ea78d30be8 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -10,23 +10,23 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.28" -parking_lot = "0.12.1" -tracing = "0.1.37" +async-trait = { workspace = true } +futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sp-api = { path = "../../../../substrate/primitives/api" } -sp-block-builder = { path = "../../../../substrate/primitives/block-builder" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } +sc-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Cumulus -cumulus-client-consensus-common = { path = "../common" } -cumulus-primitives-core = { path = "../../../primitives/core" } -cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/relay-chain/src/import_queue.rs b/cumulus/client/consensus/relay-chain/src/import_queue.rs index 1b521e79d4820fbc4c6709cb177170afa60d3f37..1d6f039da4c123fc79c1132b7b96e93a96c69411 100644 --- a/cumulus/client/consensus/relay-chain/src/import_queue.rs +++ b/cumulus/client/consensus/relay-chain/src/import_queue.rs @@ -52,7 +52,7 @@ where CIDP: CreateInherentDataProviders, { async fn verify( - &mut self, + &self, mut block_params: BlockImportParams, ) -> Result, String> { block_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 0dd7c4fdb0f60ac2f70f0fb697c901f960515b9a..bc67678eedeb199d57aad6f35f5be386231b841e 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -10,51 +10,51 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.28" -futures-timer = "3.0.2" -parking_lot = "0.12.1" -tracing = "0.1.37" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +parking_lot = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-version = { path = "../../../substrate/primitives/version" } +sc-client-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } # Polkadot -polkadot-node-primitives = { path = "../../../polkadot/node/primitives" } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } # Cumulus -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } +cumulus-relay-chain-interface = { workspace = true, default-features = true } [dev-dependencies] -portpicker = "0.1.1" -tokio = { version = "1.32.0", features = ["macros"] } -url = "2.4.0" -rstest = "0.18.2" +portpicker = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +url = { workspace = true } +rstest = { workspace = true } # Substrate -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -substrate-test-utils = { path = "../../../substrate/test-utils" } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } # Polkadot -polkadot-test-client = { path = "../../../polkadot/node/test/client" } +polkadot-test-client = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } -cumulus-test-service = { path = "../../test/service" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } +cumulus-test-service = { workspace = true } diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index eb0d7f0e01b391279648a5aea6031a275cf409a5..18d121c41d16823b0a8763132f51f9b949c41c6f 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -26,9 +26,10 @@ use futures::{executor::block_on, poll, task::Poll, FutureExt, Stream, StreamExt use parking_lot::Mutex; use polkadot_node_primitives::{SignedFullStatement, Statement}; use polkadot_primitives::{ - CandidateCommitments, CandidateDescriptor, CollatorPair, CommittedCandidateReceipt, - Hash as PHash, HeadData, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, - PersistedValidationData, SessionIndex, SigningContext, ValidationCodeHash, ValidatorId, + BlockNumber, CandidateCommitments, CandidateDescriptor, CollatorPair, + CommittedCandidateReceipt, CoreState, Hash as PHash, HeadData, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, SessionIndex, + SigningContext, ValidationCodeHash, ValidatorId, }; use polkadot_test_client::{ Client as PClient, ClientBlockImportExt, DefaultTestClientBuilderExt, FullBackend as PBackend, @@ -297,6 +298,13 @@ impl RelayChainInterface for DummyRelayChainInterface { Ok(header) } + async fn availability_cores( + &self, + _relay_parent: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test"); + } + async fn version(&self, _: PHash) -> RelayChainResult { let version = self.data.lock().runtime_version; diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 85619e8403458c0bfa3dae6dadc688f2cb895731..d81f727b41b9f5382d519c7ea84f625dbd796790 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -7,24 +7,23 @@ description = "Inherent that needs to be present in every parachain block. Conta license = "Apache-2.0" [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = { version = "2.11.1", features = ["derive"] } -tracing = { version = "0.1.37" } +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } -sp-inherents = { path = "../../../substrate/primitives/inherents" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-storage = { path = "../../../substrate/primitives/storage" } -sp-trie = { path = "../../../substrate/primitives/trie" } +sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 539802d6938663e1268e887c17f505d23c72c1c3..a95b24bc2933aa2d8529e70ee2b37759625b1757 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -10,46 +10,46 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.28" -futures-timer = "3.0.2" -rand = "0.8.5" -tracing = "0.1.37" +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +rand = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-version = { path = "../../../substrate/primitives/version" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } # Polkadot -polkadot-node-primitives = { path = "../../../polkadot/node/primitives" } -polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } -polkadot-overseer = { path = "../../../polkadot/node/overseer" } -polkadot-primitives = { path = "../../../polkadot/primitives" } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -async-trait = "0.1.79" +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +async-trait = { workspace = true } [dev-dependencies] -rstest = "0.18.2" -tokio = { version = "1.32.0", features = ["macros"] } -portpicker = "0.1.1" -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -cumulus-test-client = { path = "../../test/client" } -sc-utils = { path = "../../../substrate/client/utils" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -assert_matches = "1.5" +rstest = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +portpicker = { workspace = true } +sp-blockchain = { workspace = true, default-features = true } +cumulus-test-client = { workspace = true } +sc-utils = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +assert_matches = { workspace = true } # Cumulus -cumulus-test-service = { path = "../../test/service" } +cumulus-test-service = { workspace = true } # Substrate -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -substrate-test-utils = { path = "../../../substrate/test-utils" } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 75bf308ef27aa051e42806b7f65e7ab8b45a9c1a..6f274ed18b6bc7871607cae6c5183d6d861e5117 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -17,7 +17,9 @@ use super::*; use assert_matches::assert_matches; use codec::{Decode, Encode}; -use cumulus_primitives_core::relay_chain::{BlockId, CandidateCommitments, CandidateDescriptor}; +use cumulus_primitives_core::relay_chain::{ + BlockId, CandidateCommitments, CandidateDescriptor, CoreState, +}; use cumulus_relay_chain_interface::{ InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PHash, PHeader, PersistedValidationData, StorageValue, ValidationCodeHash, ValidatorId, @@ -478,6 +480,13 @@ impl RelayChainInterface for Relaychain { async fn header(&self, _: BlockId) -> RelayChainResult> { unimplemented!("Not needed for test"); } + + async fn availability_cores( + &self, + _: PHash, + ) -> RelayChainResult>>> { + unimplemented!("Not needed for test"); + } } fn make_candidate_chain(candidate_number_range: Range) -> Vec { diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 7629b6c631a3a0195760eee7fd39e754c289dd3e..6f1b74191be79a3c90100a18df1b20b850e0f4b2 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -10,39 +10,39 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.28" -futures-timer = "3.0.2" +async-trait = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } # Substrate -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-tracing = { path = "../../../substrate/client/tracing" } -sc-sysinfo = { path = "../../../substrate/client/sysinfo" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } # Polkadot -polkadot-cli = { path = "../../../polkadot/cli", default-features = false, features = ["cli"] } -polkadot-service = { path = "../../../polkadot/node/service" } +polkadot-cli = { features = ["cli"], workspace = true } +polkadot-service = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } [dev-dependencies] # Substrate -sp-keyring = { path = "../../../substrate/primitives/keyring" } +sp-keyring = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-test-client = { path = "../../../polkadot/node/test/client" } -metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-test-client = { workspace = true } +metered = { features = ["futures_channel"], workspace = true } # Cumulus -cumulus-test-service = { path = "../../test/service" } +cumulus-test-service = { workspace = true } diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 7871623e8447a2645ef772a495d7f698660f7dc5..8f8d666bd1433ff70556e9867edc7861647dbebf 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -19,9 +19,9 @@ use std::{pin::Pin, sync::Arc, time::Duration}; use async_trait::async_trait; use cumulus_primitives_core::{ relay_chain::{ - runtime_api::ParachainHost, Block as PBlock, BlockId, CommittedCandidateReceipt, - Hash as PHash, Header as PHeader, InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, - ValidationCodeHash, ValidatorId, + runtime_api::ParachainHost, Block as PBlock, BlockId, BlockNumber, + CommittedCandidateReceipt, CoreState, Hash as PHash, Header as PHeader, InboundHrmpMessage, + OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -256,6 +256,13 @@ impl RelayChainInterface for RelayChainInProcessInterface { Ok(Box::pin(notifications_stream)) } + async fn availability_cores( + &self, + relay_parent: PHash, + ) -> RelayChainResult>> { + Ok(self.full_client.runtime_api().availability_cores(relay_parent)?) + } + async fn candidates_pending_availability( &self, hash: PHash, diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index e8603693ac8da957988afac9e98468b759476665..a496fab050dd7fc3cba69c8a6812c5f07b27a6d4 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -10,18 +10,18 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -polkadot-overseer = { path = "../../../polkadot/node/overseer" } +polkadot-overseer = { workspace = true, default-features = true } -cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-primitives-core = { workspace = true, default-features = true } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sc-client-api = { path = "../../../substrate/client/api" } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-version = { workspace = true } -futures = "0.3.28" -async-trait = "0.1.79" +futures = { workspace = true } +async-trait = { workspace = true } thiserror = { workspace = true } -jsonrpsee-core = "0.22" -codec = { package = "parity-scale-codec", version = "3.6.12" } +jsonrpsee-core = { workspace = true } +codec = { workspace = true, default-features = true } diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index 46e19b40f010cce4342f54cbe08e975a52fee185..d02035e84e92f45c4da74f91912ee57abaa083ee 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -29,8 +29,8 @@ use sp_api::ApiError; use cumulus_primitives_core::relay_chain::BlockId; pub use cumulus_primitives_core::{ relay_chain::{ - CommittedCandidateReceipt, Hash as PHash, Header as PHeader, InboundHrmpMessage, - OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, + BlockNumber, CommittedCandidateReceipt, CoreState, Hash as PHash, Header as PHeader, + InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -217,6 +217,14 @@ pub trait RelayChainInterface: Send + Sync { /// Get the runtime version of the relay chain. async fn version(&self, relay_parent: PHash) -> RelayChainResult; + + /// Yields information on all availability cores as relevant to the child block. + /// + /// Cores are either free, scheduled or occupied. Free cores can have paras assigned to them. + async fn availability_cores( + &self, + relay_parent: PHash, + ) -> RelayChainResult>>; } #[async_trait] @@ -337,6 +345,13 @@ where .await } + async fn availability_cores( + &self, + relay_parent: PHash, + ) -> RelayChainResult>> { + (**self).availability_cores(relay_parent).await + } + async fn candidates_pending_availability( &self, block_id: PHash, diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 0b541092a3de866fa68491694db51236ef72f641..95ecadc8bd06ec52b0089dd39143e78e1a27811a 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -11,44 +11,37 @@ workspace = true [dependencies] # polkadot deps -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-core-primitives = { path = "../../../polkadot/core-primitives" } -polkadot-overseer = { path = "../../../polkadot/node/overseer" } -polkadot-node-subsystem-util = { path = "../../../polkadot/node/subsystem-util" } -polkadot-node-network-protocol = { path = "../../../polkadot/node/network/protocol" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-availability-recovery = { path = "../../../polkadot/node/network/availability-recovery" } -polkadot-collator-protocol = { path = "../../../polkadot/node/network/collator-protocol" } -polkadot-network-bridge = { path = "../../../polkadot/node/network/bridge" } -polkadot-node-collation-generation = { path = "../../../polkadot/node/collation-generation" } -polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } -polkadot-node-core-chain-api = { path = "../../../polkadot/node/core/chain-api" } -polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } -polkadot-service = { path = "../../../polkadot/node/service" } +polkadot-network-bridge = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } # substrate deps -sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-common = { path = "../../../substrate/client/network/common" } -sc-service = { path = "../../../substrate/client/service" } -sc-client-api = { path = "../../../substrate/client/api" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } -sc-tracing = { path = "../../../substrate/client/tracing" } -sc-utils = { path = "../../../substrate/client/utils" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -tokio = { version = "1.32.0", features = ["macros"] } +sc-authority-discovery = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +tokio = { features = ["macros"], workspace = true, default-features = true } # cumulus deps -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -cumulus-relay-chain-rpc-interface = { path = "../relay-chain-rpc-interface" } -cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-relay-chain-rpc-interface = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } -array-bytes = "6.2.2" -tracing = "0.1.37" -async-trait = "0.1.79" -futures = "0.3.28" -parking_lot = "0.12.1" +array-bytes = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } +async-trait = { workspace = true } +futures = { workspace = true } diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index ea6bc2ede4c041620acc18a2fcccde38b03d2388..6c0730a56a264b9805b092df203671778719bc49 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -10,39 +10,39 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -polkadot-overseer = { path = "../../../polkadot/node/overseer" } +polkadot-overseer = { workspace = true, default-features = true } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } -sp-api = { path = "../../../substrate/primitives/api" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-storage = { path = "../../../substrate/primitives/storage" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-version = { path = "../../../substrate/primitives/version" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-rpc-api = { path = "../../../substrate/client/rpc-api" } -sc-service = { path = "../../../substrate/client/service" } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } -tokio = { version = "1.32.0", features = ["sync"] } -tokio-util = { version = "0.7.8", features = ["compat"] } +tokio = { features = ["sync"], workspace = true, default-features = true } +tokio-util = { features = ["compat"], workspace = true } -futures = "0.3.28" -futures-timer = "3.0.2" -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22", features = ["ws-client"] } -tracing = "0.1.37" -async-trait = "0.1.79" -url = "2.4.0" +futures = { workspace = true } +futures-timer = { workspace = true } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["ws-client"], workspace = true } +tracing = { workspace = true, default-features = true } +async-trait = { workspace = true } +url = { workspace = true } serde_json = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } -schnellru = "0.2.1" -smoldot = { version = "0.11.0", default_features = false, features = ["std"] } -smoldot-light = { version = "0.9.0", default_features = false, features = ["std"] } -either = "1.8.1" +schnellru = { workspace = true } +smoldot = { default_features = false, features = ["std"], workspace = true } +smoldot-light = { default_features = false, features = ["std"], workspace = true } +either = { workspace = true, default-features = true } thiserror = { workspace = true } -rand = "0.8.5" -pin-project = "1.1.3" +rand = { workspace = true, default-features = true } +pin-project = { workspace = true } diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index bb7bfa5dc32268b87bfbe1788aad7b6604961276..692a1fb537a83b5493c207688fd5d40be5321be9 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -24,7 +24,7 @@ use cumulus_primitives_core::{ InboundDownwardMessage, ParaId, PersistedValidationData, }; use cumulus_relay_chain_interface::{ - PHeader, RelayChainError, RelayChainInterface, RelayChainResult, + BlockNumber, CoreState, PHeader, RelayChainError, RelayChainInterface, RelayChainResult, }; use futures::{FutureExt, Stream, StreamExt}; use polkadot_overseer::Handle; @@ -252,4 +252,11 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn version(&self, relay_parent: RelayHash) -> RelayChainResult { self.rpc_client.runtime_version(relay_parent).await } + + async fn availability_cores( + &self, + relay_parent: RelayHash, + ) -> RelayChainResult>> { + self.rpc_client.parachain_host_availability_cores(relay_parent).await + } } diff --git a/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs b/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs index 9a49b60281b3c51fa1426903a0e73157a6f04e0e..2347dbb85f78ed1d8017ba076f6d77e97664c021 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs @@ -20,7 +20,7 @@ use futures::{channel::mpsc::Sender, prelude::*, stream::FuturesUnordered}; use jsonrpsee::core::client::{ - Client as JsonRpseeClient, ClientBuilder, ClientT, Error, ReceivedMessage, TransportReceiverT, + Client as JsonRpseeClient, ClientBuilder, ClientT, ReceivedMessage, TransportReceiverT, TransportSenderT, }; use smoldot_light::{ChainId, Client as SmoldotClient, JsonRpcResponses}; @@ -124,7 +124,7 @@ pub struct LightClientRpcWorker { } fn handle_notification( - maybe_header: Option>, + maybe_header: Option>, senders: &mut Vec>, ) -> Result<(), ()> { match maybe_header { diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index e03e20fe5b416102aa99739cb00c0b39edf5b999..8e9e41ca89dc06401c04e36b5cc0db7ffb3e36d7 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -10,39 +10,39 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -futures = "0.3.28" +futures = { workspace = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-rpc = { path = "../../../substrate/client/rpc" } -sc-service = { path = "../../../substrate/client/service" } -sc-sysinfo = { path = "../../../substrate/client/sysinfo" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-sync = { path = "../../../substrate/client/network/sync" } -sc-utils = { path = "../../../substrate/client/utils" } -sc-network-transactions = { path = "../../../substrate/client/network/transactions" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool" } -sp-io = { path = "../../../substrate/primitives/io" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sc-network-transactions = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives" } +polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-client-cli = { path = "../cli" } -cumulus-client-collator = { path = "../collator" } -cumulus-client-consensus-common = { path = "../consensus/common" } -cumulus-client-pov-recovery = { path = "../pov-recovery" } -cumulus-client-network = { path = "../network" } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } -cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } +cumulus-client-cli = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } +cumulus-client-network = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } +cumulus-relay-chain-minimal-node = { workspace = true, default-features = true } diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index daff5ef8f482e82b80f341d5208cad51b23b7b1a..1b6ac4cf07dff2341c366a18e103f7e0ec49e2ac 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -10,26 +10,26 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-aura = { workspace = true } +pallet-timestamp = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } [dev-dependencies] # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system" } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/pallets/aura-ext/src/consensus_hook.rs b/cumulus/pallets/aura-ext/src/consensus_hook.rs index 592029803391179785bb3b5606079d1bee6b553f..560d477b2a850d11fbc2202b900add53351de476 100644 --- a/cumulus/pallets/aura-ext/src/consensus_hook.rs +++ b/cumulus/pallets/aura-ext/src/consensus_hook.rs @@ -65,9 +65,19 @@ where let para_slot_from_relay = Slot::from_timestamp(relay_chain_timestamp.into(), para_slot_duration); - // Perform checks. - assert_eq!(slot, para_slot_from_relay, "slot number mismatch"); - if authored > velocity + 1 { + // Check that we are not too far in the future. Since we expect `V` parachain blocks + // during the relay chain slot, we can allow for `V` parachain slots into the future. + if *slot > *para_slot_from_relay + u64::from(velocity) { + panic!( + "Parachain slot is too far in the future: parachain_slot: {:?}, derived_from_relay_slot: {:?} velocity: {:?}", + slot, + para_slot_from_relay, + velocity + ); + } + + // We need to allow authoring multiple blocks in the same slot. + if slot != para_slot_from_relay && authored > velocity { panic!("authored blocks limit is reached for the slot") } let weight = T::DbWeight::get().reads(1); @@ -113,6 +123,11 @@ impl< return false } + // TODO: This logic needs to be adjusted. + // It checks that we have not authored more than `V + 1` blocks in the slot. + // As a slot however, we take the parachain slot here. Velocity should + // be measured in relation to the relay chain slot. + // https://github.com/paritytech/polkadot-sdk/issues/3967 if last_slot == new_slot { authored_so_far < velocity + 1 } else { diff --git a/cumulus/pallets/aura-ext/src/lib.rs b/cumulus/pallets/aura-ext/src/lib.rs index 7ca84dff7c513c2406d3c0de7b9c0ac26048f508..4605dd325bee5145974ddc7bca4b5900ad2bc0ed 100644 --- a/cumulus/pallets/aura-ext/src/lib.rs +++ b/cumulus/pallets/aura-ext/src/lib.rs @@ -83,7 +83,7 @@ pub mod pallet { SlotInfo::::put((new_slot, authored)); - T::DbWeight::get().reads_writes(2, 1) + T::DbWeight::get().reads_writes(4, 2) } } diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index f30802fa5d82ecb93e8610e7c7bb17a2a83cacb4..206700b7d606c81bef4f33cbfe6e705e6efb49ad 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -17,29 +17,29 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.6.12" } -rand = { version = "0.8.5", features = ["std_rng"], default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +rand = { features = ["std_rng"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -pallet-timestamp = { path = "../../../substrate/frame/timestamp" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -pallet-aura = { path = "../../../substrate/frame/aura" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +pallet-aura = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs index 6521c954eac23f6663e59b593d77e093a626196d..459b1cb5fdf28c118751c3afccc4b882891d1cc3 100644 --- a/cumulus/pallets/collator-selection/src/mock.rs +++ b/cumulus/pallets/collator-selection/src/mock.rs @@ -53,23 +53,12 @@ impl system::Config for Test { parameter_types! { pub const ExistentialDeposit: u64 = 5; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } pub struct Author4; diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 687cda164fb0bd3d4aefb9d6b51f6735ef3a43c3..052a2547e788b0837cf224cb38095d40c20ed0fd 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -14,26 +14,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../substrate/primitives/core" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } +sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 1a6a19f2ab4a2523074bd02aa96053a0f7236140..30a232f01b3e5ca82eab239626e2255a683972ee 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -10,62 +10,62 @@ license = "Apache-2.0" workspace = true [dependencies] -bytes = { version = "1.4.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -environmental = { version = "1.1.4", default-features = false } -impl-trait-for-tuples = "0.2.1" +bytes = { workspace = true } +codec = { features = ["derive"], workspace = true } +environmental = { workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } -trie-db = { version = "0.29.0", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +trie-db = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-message-queue = { workspace = true } +sp-core = { workspace = true } +sp-externalities = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } +sp-version = { workspace = true } # Polkadot -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false, features = ["wasm-api"] } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false, optional = true } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +polkadot-runtime-parachains = { workspace = true } +polkadot-runtime-common = { optional = true, workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } # Cumulus -cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } -cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } +cumulus-pallet-parachain-system-proc-macro = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-parachain-inherent = { workspace = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true } [dev-dependencies] -assert_matches = "1.5" -hex-literal = "0.4.1" -lazy_static = "1.4" -trie-standardmap = "0.16.0" -rand = "0.8.5" -futures = "0.3.28" +assert_matches = { workspace = true } +hex-literal = { workspace = true, default-features = true } +lazy_static = { workspace = true } +trie-standardmap = { workspace = true } +rand = { workspace = true, default-features = true } +futures = { workspace = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -sp-version = { path = "../../../substrate/primitives/version" } -sp-consensus-slots = { path = "../../../substrate/primitives/consensus/slots" } +sc-client-api = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } # Cumulus -cumulus-test-client = { path = "../../test/client" } -cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } -cumulus-test-runtime = { path = "../../test/runtime" } +cumulus-test-client = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index 0a90c30e0331261026125f429efe70eff07ac069..da6f0fd03efb79b03e4815433084ca8b79f6595d 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -14,9 +14,9 @@ proc-macro = true [dependencies] syn = { workspace = true } -proc-macro2 = "1.0.64" +proc-macro2 = { workspace = true } quote = { workspace = true } -proc-macro-crate = "3.0.0" +proc-macro-crate = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/session-benchmarking/Cargo.toml b/cumulus/pallets/session-benchmarking/Cargo.toml index 62c923de59f25d7fc6ba8db15df48e625e16e118..df671566cdc24b8fca9486d0444aad6a1688eedb 100644 --- a/cumulus/pallets/session-benchmarking/Cargo.toml +++ b/cumulus/pallets/session-benchmarking/Cargo.toml @@ -16,13 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } +codec = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-session = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 17b0fb2a01662d517a49d1bfd669ed071caf0ed7..ced1b24f1d2bf52f1f6db128affe3bd8595411ba 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -10,21 +10,21 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-sudo = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +polkadot-primitives = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 178d981702f2e6dc42d05556e20a86f50106b6ee..1f0cef70e3a72920d6c789acb9051ddf2a7ff8e8 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -10,18 +10,18 @@ description = "Pallet for stuff specific to parachains' usage of XCM" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 87602978521fc363539b2aeb81f5b485dbe409ea..c542fa373b5ed352edc4e00ea032b6dd64c97415 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -10,45 +10,45 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"], default-features = false } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +pallet-message-queue = { workspace = true } # Polkadot -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-runtime-parachains = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } # Optional import for benchmarking -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -bounded-collections = { version = "0.2.0", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +bounded-collections = { workspace = true } # Bridges -bp-xcm-bridge-hub-router = { path = "../../../bridges/primitives/xcm-bridge-hub-router", default-features = false, optional = true } +bp-xcm-bridge-hub-router = { optional = true, workspace = true } [dev-dependencies] # Substrate -sp-core = { path = "../../../substrate/primitives/core" } -pallet-balances = { path = "../../../substrate/frame/balances" } -frame-support = { path = "../../../substrate/frame/support", features = ["experimental"] } +sp-core = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system" } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 5633f05f13bb81370a23512effefaf6ae1fb23fa..45126a9425d4c0400c7b76f55173339fd4c4a2bb 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -491,7 +491,7 @@ impl Pallet { let channel_info = T::ChannelInfo::get_channel_info(recipient).ok_or(MessageSendError::NoChannel)?; // Max message size refers to aggregates, or pages. Not to individual fragments. - let max_message_size = channel_info.max_message_size as usize; + let max_message_size = channel_info.max_message_size.min(T::MaxPageSize::get()) as usize; let format_size = format.encoded_size(); // We check the encoded fragment length plus the format size against the max message size // because the format is concatenated if a new page is needed. @@ -522,7 +522,7 @@ impl Pallet { // We return the size of the last page inside of the option, to not calculate it again. let appended_to_last_page = have_active .then(|| { - >::mutate( + >::try_mutate( recipient, channel_details.last_index - 1, |page| { @@ -532,17 +532,18 @@ impl Pallet { ) != Ok(format) { defensive!("Bad format in outbound queue; dropping message"); - return None + return Err(()) } if page.len() + encoded_fragment.len() > max_message_size { - return None + return Err(()) } for frag in encoded_fragment.iter() { - page.try_push(*frag).ok()?; + page.try_push(*frag)?; } - Some(page.len()) + Ok(page.len()) }, ) + .ok() }) .flatten(); diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index e166a78ee822097ebadb613ed81dd344a6574bf0..7fb96de7a4eaae7e7f78fae66179a26a3a6c5866 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -85,25 +85,14 @@ impl frame_system::Config for Test { parameter_types! { pub const ExistentialDeposit: u64 = 5; - pub const MaxReserves: u32 = 50; } pub type Balance = u64; +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl cumulus_pallet_parachain_system::Config for Test { diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index cdf41e27f0b27aa99a8fc73fd42f98b02ee0e48e..5b02baf2310a368c3b49269e4c594a81d47a3a01 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -28,6 +28,7 @@ use frame_support::{ use mock::{new_test_ext, ParachainSystem, RuntimeOrigin as Origin, Test, XcmpQueue}; use sp_runtime::traits::{BadOrigin, Zero}; use std::iter::{once, repeat}; +use xcm_builder::InspectMessageQueues; #[test] fn empty_concatenated_works() { @@ -854,7 +855,6 @@ fn verify_fee_factor_increase_and_decrease() { #[test] fn get_messages_works() { new_test_ext().execute_with(|| { - use xcm_builder::InspectMessageQueues; let sibling_para_id = ParaId::from(2001); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(sibling_para_id); let destination: Location = (Parent, Parachain(sibling_para_id.into())).into(); @@ -890,3 +890,32 @@ fn get_messages_works() { ); }); } + +/// We try to send a fragment that will not fit into the currently active page. This should +/// therefore not modify the current page but instead create a new one. +#[test] +fn page_not_modified_when_fragment_does_not_fit() { + new_test_ext().execute_with(|| { + let sibling = ParaId::from(2001); + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(sibling); + + let destination: Location = (Parent, Parachain(sibling.into())).into(); + let message = Xcm(vec![ClearOrigin; 600]); + + loop { + let old_page_zero = OutboundXcmpMessages::::get(sibling, 0); + assert_ok!(send_xcm::(destination.clone(), message.clone())); + + // If a new page was created by this send_xcm call, then page_zero was not also + // modified: + let num_pages = OutboundXcmpMessages::::iter_prefix(sibling).count(); + if num_pages == 2 { + let new_page_zero = OutboundXcmpMessages::::get(sibling, 0); + assert_eq!(old_page_zero, new_page_zero); + break + } else if num_pages > 2 { + panic!("Too many pages created"); + } + } + }); +} diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index a0d5ddff6ebb1d4808a48c5af16d3a60bb648ad8..a6ba01ffa394dfdba05e65014006f10eb68ea858 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -23,9 +23,8 @@ "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", - "/dns/pch13.rotko.net/tcp/33573/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/pch16.rotko.net/tcp/33576/p2p/12D3KooWKrm3XmuGzJH17Wcn4HRDGsEjLZGDgN77q3ZhwnnQP7y1", + "/dns/pch16.rotko.net/tcp/35576/wss/p2p/12D3KooWKrm3XmuGzJH17Wcn4HRDGsEjLZGDgN77q3ZhwnnQP7y1", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" diff --git a/cumulus/parachains/chain-specs/people-kusama.json b/cumulus/parachains/chain-specs/people-kusama.json index 00a38b675def73cee48e38e084938e158932bbe2..3352cb25a2898f5442b1e28b340def69fc4168f2 100644 --- a/cumulus/parachains/chain-specs/people-kusama.json +++ b/cumulus/parachains/chain-specs/people-kusama.json @@ -8,7 +8,25 @@ "/dns/kusama-people-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWQaqG5TNmDfRWrtH7tMsN7YeqwVkSfoZT4GkemSzezNi1", "/dns/kusama-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWKhYoQH9LdSyvY3SVZY9gFf6ZV1bFh6317TRehUP3r5fm", "/dns/people-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA", - "/dns/people-kusama.bootnode.amforc.com/tcp/30004/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA" + "/dns/people-kusama.bootnode.amforc.com/tcp/30004/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA", + "/dns/boot.gatotech.network/tcp/33240/p2p/12D3KooWLi9TzaKX4zniJpiM521PnYG4EocpdqjPpJUhXq9QGkRX", + "/dns/boot.gatotech.network/tcp/35240/wss/p2p/12D3KooWLi9TzaKX4zniJpiM521PnYG4EocpdqjPpJUhXq9QGkRX", + "/dns/people-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWGP1C9iWTHnZyeaSjYZ7LdK8douXWc1n1dBv25XEASHaj", + "/dns/people-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWGP1C9iWTHnZyeaSjYZ7LdK8douXWc1n1dBv25XEASHaj", + "/dns/kppl16.rotko.net/tcp/33756/p2p/12D3KooWSKQwgoydfbN6mNN2aNwdqfkR2ExAnTRs8mmdrPQTtDLo", + "/dns/kppl16.rotko.net/tcp/35756/wss/p2p/12D3KooWSKQwgoydfbN6mNN2aNwdqfkR2ExAnTRs8mmdrPQTtDLo", + "/dns/people-kusama-boot-ng.dwellir.com/tcp/30359/p2p/12D3KooWM6T8MMibxLZhhpq6F612CZ4FgnfDSJSkWDMiVUDe1aGb", + "/dns/people-kusama-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWM6T8MMibxLZhhpq6F612CZ4FgnfDSJSkWDMiVUDe1aGb", + "/dns/people-kusama-bootnode.turboflakes.io/tcp/30645/p2p/12D3KooWCR2Q8J2NFFfuofDak4zSgWkuBq7orP96HFaxLgAoDUBV", + "/dns/people-kusama-bootnode.turboflakes.io/tcp/30745/wss/p2p/12D3KooWCR2Q8J2NFFfuofDak4zSgWkuBq7orP96HFaxLgAoDUBV", + "/dns/boot-node.helikon.io/tcp/7510/p2p/12D3KooWM1X4setrMWjwnV8iDkAtYhqFHNkGozdWdq6sawWh5Yhv", + "/dns/boot-node.helikon.io/tcp/7512/wss/p2p/12D3KooWM1X4setrMWjwnV8iDkAtYhqFHNkGozdWdq6sawWh5Yhv", + "/dns/people-kusama.bootnodes.polkadotters.com/tcp/30377/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/people-kusama.bootnodes.polkadotters.com/tcp/30379/wss/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/boot.metaspan.io/tcp/25068/p2p/12D3KooWDoDLtLvQi8hhFVyubPZhaYuAwSAJrPFtyGWJ2NSfBiyP", + "/dns/boot.metaspan.io/tcp/25069/wss/p2p/12D3KooWDoDLtLvQi8hhFVyubPZhaYuAwSAJrPFtyGWJ2NSfBiyP", + "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/30342/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn", + "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/443/wss/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 2b943b6dca55989a891895b4abb3195970978b06..6eac9a09672675c3cb46d1eb49392bb0651eb7dd 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -13,42 +13,42 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"], default-features = false } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-asset-tx-payment = { path = "../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } -pallet-assets = { path = "../../../substrate/frame/assets", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-asset-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { workspace = true } +polkadot-primitives = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../pallets/parachain-info", default-features = false } +pallet-collator-selection = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +parachain-info = { workspace = true } [dev-dependencies] -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } +pallet-authorship = { workspace = true } +sp-io = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index ed9c5c483fa74282306d20c20a6c6313bd940f5a..16cda1a4ed83850f4e67765a5e7092dad4a24305 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -202,7 +202,7 @@ mod tests { use frame_system::{limits, EnsureRoot}; use pallet_collator_selection::IdentityCollator; use polkadot_primitives::AccountId; - use sp_core::{ConstU64, H256}; + use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, BuildStorage, Perbill, @@ -224,7 +224,6 @@ mod tests { parameter_types! { pub BlockLength: limits::BlockLength = limits::BlockLength::max(2 * 1024); pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const MaxReserves: u32 = 50; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -253,20 +252,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } pub struct OneAuthor; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml index 8100e681348836fb28c9236b9ba20d27f117d71b..7bd91ae6774c61f93b12a91af298e64777980ac1 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -13,16 +13,16 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } -rococo-emulated-chain = { path = "../../../relays/rococo" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["rococo"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +asset-hub-rococo-runtime = { workspace = true, default-features = true } +rococo-emulated-chain = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs index e5378b35f5e484e10db94c66bc5244099b682604..3a87322664d91edb829b44603122c300602f2978 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -21,7 +21,7 @@ use sp_core::{sr25519, storage::Storage}; use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, - SAFE_XCM_VERSION, + SAFE_XCM_VERSION, USDT_ID, }; use parachains_common::{AccountId, Balance}; @@ -68,7 +68,10 @@ pub fn genesis() -> Storage { ..Default::default() }, assets: asset_hub_rococo_runtime::AssetsConfig { - assets: vec![(RESERVABLE_ASSET_ID, AssetHubRococoAssetOwner::get(), true, ED)], + assets: vec![ + (RESERVABLE_ASSET_ID, AssetHubRococoAssetOwner::get(), true, ED), + (USDT_ID, AssetHubRococoAssetOwner::get(), true, ED), + ], ..Default::default() }, foreign_assets: asset_hub_rococo_runtime::ForeignAssetsConfig { diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index 202d02b250bb2e90261a01c13c6aab59c674b511..80d2376c6811d7e8f407330dd3c2b02a5260159b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub use asset_hub_rococo_runtime; + pub mod genesis; // Substrate diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml index e0abaa66c5cabba445b91c19436f9a4ce3642386..86d4ce3e7ac829e959aa3169adbd22a257cc26a5 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -13,16 +13,16 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -asset-hub-westend-runtime = { path = "../../../../../../runtimes/assets/asset-hub-westend" } -westend-emulated-chain = { path = "../../../relays/westend" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["westend"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +asset-hub-westend-runtime = { workspace = true } +westend-emulated-chain = { workspace = true, default-features = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 6043a6aeda48f1e1ec010ac42e98a50feaae3a30..608690218d2f4c439511e508e261fe3e74d37465 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub use asset_hub_westend_runtime; + pub mod genesis; // Substrate diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml index 789f10a35f268c62e0cc9fa153c99e7e5282ee8d..f3c0799ad0f6acf14b3e99d4e27e46921c4e9cb9 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } -bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["rococo"] } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +bridge-hub-rococo-runtime = { workspace = true, default-features = true } +bridge-hub-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs index 8c18d112bc12fb4883d313106fa66841dcad8d2e..d8b8edaf2409ba1da0aef01874792d6ef21e6a8a 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -15,6 +15,11 @@ pub mod genesis; +pub use bridge_hub_rococo_runtime::{ + xcm_config::XcmConfig as BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue, + RuntimeOrigin as BridgeHubRococoRuntimeOrigin, +}; + // Substrate use frame_support::traits::OnInitialize; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml index d82971cf55aeddf20032be952b8a980014434f6b..ebcec9641e7d9f122f5b6737769bc140d9c877a5 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -bridge-hub-westend-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-westend" } -bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["westend"] } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +bridge-hub-westend-runtime = { workspace = true, default-features = true } +bridge-hub-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs index b0dddc9dbf9a5b71a776e3ae48b97bbb9f29adf2..f701b3096994a3da7bd59726bedb0802e1b14276 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -15,6 +15,8 @@ pub mod genesis; +pub use bridge_hub_westend_runtime::xcm_config::XcmConfig as BridgeHubWestendXcmConfig; + // Substrate use frame_support::traits::OnInitialize; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml index 4c2a7d3c274dce6eade9c9d42be00301bd6dc462..87dfd73ab05bab063bedeebae9feb03bcc4af46f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -collectives-westend-runtime = { path = "../../../../../../runtimes/collectives/collectives-westend" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["westend"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +collectives-westend-runtime = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs index a32e865dd9ce8497755a261c6922273aea8b49f6..f90d82231a3bb1ea64cfb6acad2759bcd486f1f9 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub use collectives_westend_runtime; + pub mod genesis; // Substrate diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml index f7fe93d27775a28cb560d8791a3b0d8ed49c9d68..1549d6a2ab6ba1de05d9233ff7bc29951501a43d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml @@ -10,12 +10,12 @@ publish = false [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -people-rococo-runtime = { path = "../../../../../../runtimes/people/people-rococo" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["rococo"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +people-rococo-runtime = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/lib.rs index fa818bf81bf60ac6358c1c983faf8657cb139dd3..c8da97cc3e8bf6371ddcb2d6a122f06fb80cb518 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/lib.rs @@ -12,6 +12,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +pub use people_rococo_runtime; pub mod genesis; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml index 57a767e0c2a3eb7d23df7f8d95fd78128c996f35..9c5ac0bca9de7ae2f201aba958b8220b9a24a013 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml @@ -10,12 +10,12 @@ publish = false [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -people-westend-runtime = { path = "../../../../../../runtimes/people/people-westend" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["westend"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +people-westend-runtime = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/lib.rs index 775b89ac208b022f898c7c54423e8a21b7214ae3..904ce34d8c08a63500a1b3c815b8d1c469a89861 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/lib.rs @@ -12,6 +12,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +pub use people_westend_runtime; pub mod genesis; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index 2ac508273c6158ddae08615d8574102f98e3e788..9e6b14b585984d9b384f835f1b51f8a3e2e658a8 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -13,14 +13,14 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +penpal-runtime = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index c268b014bfa34e1b8c0a450ae2e446bb6f636c9d..91793d33f304fbbf4d1d2aeea0bf315730a5794f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -13,11 +13,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub use penpal_runtime::{self, xcm_config::RelayNetworkId as PenpalRelayNetworkId}; + mod genesis; pub use genesis::{genesis, PenpalAssetOwner, PenpalSudoAccount, ED, PARA_ID_A, PARA_ID_B}; -pub use penpal_runtime::xcm_config::{ - CustomizableAssetFromSystemAssetHub, RelayNetworkId as PenpalRelayNetworkId, -}; // Substrate use frame_support::traits::OnInitialize; diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml index 113036b4c00ea697507166a1f1da8d44458bacdd..9376687947e6c3683d5c1c58fdaf77452fd01449 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -13,17 +13,17 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../../../../../substrate/primitives/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } +sp-core = { workspace = true } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } -rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants", default-features = false } -rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } +polkadot-primitives = { workspace = true } +rococo-runtime-constants = { workspace = true } +rococo-runtime = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs index 7a3a936ec972f0a8c99e3b472c7cce9e9914e29c..bd637a5f7965bca47b171283bb37473b009bdbc1 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -12,6 +12,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +pub use rococo_runtime; pub mod genesis; diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index b952477c47a7c33277d9cd6bf09ea7a2ba1dd799..de285d9885a2f78fdea1a27ea8b8d9dc3840d300 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -13,21 +13,21 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../../../../../substrate/primitives/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } -pallet-staking = { path = "../../../../../../../substrate/frame/staking", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true } +pallet-staking = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } -westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants", default-features = false } -westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +polkadot-primitives = { workspace = true } +westend-runtime-constants = { workspace = true } +westend-runtime = { workspace = true } +xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs index 83af58f61732d7c0e545c365ab2d049125f3777e..ce9fafcd5bda8bd815d197b7e4a1ee70f6eb426c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -12,6 +12,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +pub use westend_runtime; pub mod genesis; diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index d9ec813232309998e8e50557138773d14c65c04b..7152f1dbc272bd8eef49e2343b2c5cbbeb9f1ba4 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -10,37 +10,37 @@ description = "Common resources for integration testing with xcm-emulator" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -paste = "1.0.14" +codec = { workspace = true } +paste = { workspace = true, default-features = true } # Substrate -sp-consensus-beefy = { path = "../../../../../substrate/primitives/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../../../substrate/client/consensus/grandpa" } -sp-authority-discovery = { path = "../../../../../substrate/primitives/authority-discovery" } -sp-runtime = { path = "../../../../../substrate/primitives/runtime" } -frame-support = { path = "../../../../../substrate/frame/support" } -sp-core = { path = "../../../../../substrate/primitives/core" } -sp-consensus-babe = { path = "../../../../../substrate/primitives/consensus/babe" } -pallet-assets = { path = "../../../../../substrate/frame/assets" } -pallet-balances = { path = "../../../../../substrate/frame/balances" } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue" } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../../../polkadot/primitives" } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain" } -polkadot-runtime-parachains = { path = "../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm" } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } # Cumulus -parachains-common = { path = "../../../common" } -cumulus-primitives-core = { path = "../../../../primitives/core" } -xcm-emulator = { path = "../../../../xcm/xcm-emulator" } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue" } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system" } -asset-test-utils = { path = "../../../runtimes/assets/test-utils" } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +xcm-emulator = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } +asset-test-utils = { workspace = true, default-features = true } # Bridges -bp-messages = { path = "../../../../../bridges/primitives/messages" } -pallet-bridge-messages = { path = "../../../../../bridges/modules/messages" } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common" } +bp-messages = { workspace = true, default-features = true } +pallet-bridge-messages = { workspace = true, default-features = true } +bridge-runtime-common = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 4a9d3b3a5aaf5bb885bf4fa92759f67f9165a85a..7077fbbb0a9aa2d1b572720da448ccae2fe95f03 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -51,11 +51,14 @@ pub const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; type AccountPublic = ::Signer; -// This asset is added to AH as Asset and reserved transfer between Parachain and AH +// (trust-backed) Asset registered on AH and reserve-transferred between Parachain and AH pub const RESERVABLE_ASSET_ID: u32 = 1; -// This asset is added to AH as ForeignAsset and teleported between Penpal and AH +// ForeignAsset registered on AH and teleported between Penpal and AH pub const TELEPORTABLE_ASSET_ID: u32 = 2; +// USDT registered on AH as (trust-backed) Asset and reserve-transferred between Parachain and AH +pub const USDT_ID: u32 = 1984; + pub const PENPAL_ID: u32 = 2000; pub const ASSETS_PALLET_ID: u8 = 50; diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml index eb0a8a850d06928d67147dc14a11f566d1ad7c9d..298be7362ec3a4e63bb4139b1050d1b0223c9382 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml @@ -12,9 +12,9 @@ workspace = true [dependencies] # Cumulus -emulated-integration-tests-common = { path = "../../common", default-features = false } -rococo-emulated-chain = { path = "../../chains/relays/rococo" } -asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } -bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } -people-rococo-emulated-chain = { path = "../../chains/parachains/people/people-rococo" } -penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } +emulated-integration-tests-common = { workspace = true } +rococo-emulated-chain = { workspace = true } +asset-hub-rococo-emulated-chain = { workspace = true } +bridge-hub-rococo-emulated-chain = { workspace = true } +people-rococo-emulated-chain = { workspace = true } +penpal-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml index 744cbe4f8c1e31ed5a9b122a5b5939509234236a..cd0cb272b7f5ea9bea09835b7cf4612ad9c377be 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml @@ -12,11 +12,11 @@ workspace = true [dependencies] # Cumulus -emulated-integration-tests-common = { path = "../../common", default-features = false } -rococo-emulated-chain = { path = "../../chains/relays/rococo" } -westend-emulated-chain = { path = "../../chains/relays/westend" } -asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } -asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } -bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } -bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } -penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } +emulated-integration-tests-common = { workspace = true } +rococo-emulated-chain = { workspace = true } +westend-emulated-chain = { workspace = true, default-features = true } +asset-hub-rococo-emulated-chain = { workspace = true } +asset-hub-westend-emulated-chain = { workspace = true } +bridge-hub-rococo-emulated-chain = { workspace = true } +bridge-hub-westend-emulated-chain = { workspace = true } +penpal-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml index 64bc91f442d1b27166fbc4f0a2dc22798c97ff39..37c14aa30352922b6f9f44ab89c6501484c3212d 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml @@ -12,10 +12,10 @@ workspace = true [dependencies] # Cumulus -emulated-integration-tests-common = { path = "../../common", default-features = false } -westend-emulated-chain = { path = "../../chains/relays/westend", default-features = false } -asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } -bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } -collectives-westend-emulated-chain = { path = "../../chains/parachains/collectives/collectives-westend" } -penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } -people-westend-emulated-chain = { path = "../../chains/parachains/people/people-westend" } +emulated-integration-tests-common = { workspace = true } +westend-emulated-chain = { workspace = true } +asset-hub-westend-emulated-chain = { workspace = true } +bridge-hub-westend-emulated-chain = { workspace = true } +collectives-westend-emulated-chain = { workspace = true } +penpal-emulated-chain = { workspace = true } +people-westend-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 9abecbecc48a725448cfb17508351d5e76f848de..b4579da94cbf6fc6fbb5f5d0e47fdfabdb5134da 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -11,32 +11,29 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -assert_matches = "1.5.0" +codec = { workspace = true } +assert_matches = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } -pallet-utility = { path = "../../../../../../../substrate/frame/utility", default-features = false } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +pallet-balances = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } -rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants" } +xcm = { workspace = true } +pallet-xcm = { workspace = true } +xcm-executor = { workspace = true } +polkadot-runtime-common = { workspace = true, default-features = true } +rococo-runtime-constants = { workspace = true, default-features = true } # Cumulus -asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } -parachains-common = { path = "../../../../../common" } -asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } -penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-system-emulated-network = { path = "../../../networks/rococo-system" } +asset-test-utils = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +rococo-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 2bd388bee400ed2e61869e126a1828b93422f2c0..f00945926963cb562c627eba90171f7ad58c70a5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -46,14 +46,36 @@ mod imports { pub use parachains_common::Balance; pub use rococo_system_emulated_network::{ asset_hub_rococo_emulated_chain::{ + asset_hub_rococo_runtime::{ + xcm_config::{ + self as ahr_xcm_config, TokenLocation as RelayLocation, + XcmConfig as AssetHubRococoXcmConfig, + }, + AssetConversionOrigin as AssetHubRococoAssetConversionOrigin, + }, genesis::{AssetHubRococoAssetOwner, ED as ASSET_HUB_ROCOCO_ED}, AssetHubRococoParaPallet as AssetHubRococoPallet, }, penpal_emulated_chain::{ + penpal_runtime::xcm_config::{ + CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, + LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, + LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, + }, PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet, ED as PENPAL_ED, }, - rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, + rococo_emulated_chain::{ + genesis::ED as ROCOCO_ED, + rococo_runtime::{ + governance as rococo_governance, + xcm_config::{ + UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, + }, + OriginCaller as RococoOriginCaller, + }, + RococoRelayPallet as RococoPallet, + }, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, BridgeHubRococoPara as BridgeHubRococo, BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, PenpalAPara as PenpalA, @@ -62,18 +84,6 @@ mod imports { RococoRelayReceiver as RococoReceiver, RococoRelaySender as RococoSender, }; - // Runtimes - pub use asset_hub_rococo_runtime::xcm_config::{ - TokenLocation as RelayLocation, XcmConfig as AssetHubRococoXcmConfig, - }; - pub use penpal_runtime::xcm_config::{ - LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, - LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - }; - pub use rococo_runtime::xcm_config::{ - UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, - }; - pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs index edaaa998a9ca11f97b9d2c85e8b2b88d1c570fbc..7ff6d6c193c9b414632b13e267220a59c770449a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs @@ -170,7 +170,7 @@ fn transfer_foreign_assets_from_asset_hub_to_para() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Westend)]).encode(), )], )); @@ -300,7 +300,7 @@ fn transfer_foreign_assets_from_para_to_asset_hub() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Westend)]).encode(), )], )); @@ -454,7 +454,7 @@ fn transfer_foreign_assets_from_para_to_para_through_asset_hub() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Westend)]).encode(), )], )); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index ec48e400ff545686fe728025eacc7ea5cd783d6f..16e0512da960559d192fb427ad55485fd0922172 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -17,10 +17,7 @@ use crate::imports::*; #[test] fn swap_locally_on_chain_using_local_assets() { - let asset_native = Box::new( - v3::Location::try_from(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()) - .expect("conversion works"), - ); + let asset_native = Box::new(v3::Location::try_from(RelayLocation::get()).unwrap()); let asset_one = Box::new(v3::Location::new( 0, [ @@ -230,12 +227,12 @@ fn swap_locally_on_chain_using_foreign_assets() { #[test] fn cannot_create_pool_from_pool_assets() { - let asset_native = asset_hub_rococo_runtime::xcm_config::TokenLocation::get(); - let mut asset_one = asset_hub_rococo_runtime::xcm_config::PoolAssetsPalletLocation::get(); + let asset_native = RelayLocation::get(); + let mut asset_one = ahr_xcm_config::PoolAssetsPalletLocation::get(); asset_one.append_with(GeneralIndex(ASSET_ID.into())).expect("pool assets"); AssetHubRococo::execute_with(|| { - let pool_owner_account_id = asset_hub_rococo_runtime::AssetConversionOrigin::get(); + let pool_owner_account_id = AssetHubRococoAssetConversionOrigin::get(); assert_ok!(::PoolAssets::create( ::RuntimeOrigin::signed(pool_owner_account_id.clone()), @@ -255,8 +252,8 @@ fn cannot_create_pool_from_pool_assets() { assert_matches::assert_matches!( ::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - Box::new(v3::Location::try_from(asset_native).expect("conversion works")), - Box::new(v3::Location::try_from(asset_one).expect("conversion works")), + Box::new(v3::Location::try_from(asset_native).unwrap()), + Box::new(v3::Location::try_from(asset_one).unwrap()), ), Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("Unknown")) ); @@ -265,9 +262,7 @@ fn cannot_create_pool_from_pool_assets() { #[test] fn pay_xcm_fee_with_some_asset_swapped_for_native() { - let asset_native = - v3::Location::try_from(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()) - .expect("conversion works"); + let asset_native = v3::Location::try_from(RelayLocation::get()).unwrap(); let asset_one = xcm::v3::Location { parents: 0, interior: [ diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs index 01bf40ae8fdf2cf87092c83ef604ef25427e2939..f8190e11c51c8b202804dcee9cf689d93ef33678 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs @@ -20,12 +20,11 @@ use frame_support::{ sp_runtime::traits::Dispatchable, traits::{ fungible::Inspect, - fungibles::{Create, Inspect as FungiblesInspect, Mutate}, + fungibles::{Inspect as FungiblesInspect, Mutate}, }, }; use parachains_common::AccountId; use polkadot_runtime_common::impls::VersionedLocatableAsset; -use rococo_runtime::OriginCaller; use rococo_runtime_constants::currency::GRAND; use xcm_executor::traits::ConvertLocation; @@ -67,7 +66,7 @@ fn spend_roc_on_asset_hub() { let treasury_location: Location = (Parent, PalletInstance(18)).into(); let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { - as_origin: bx!(OriginCaller::system(RawOrigin::Signed(treasury_account))), + as_origin: bx!(RococoOriginCaller::system(RawOrigin::Signed(treasury_account))), call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), beneficiary: bx!(VersionedLocation::V4(treasury_location)), @@ -99,7 +98,7 @@ fn spend_roc_on_asset_hub() { // Fund Alice account from Rococo Treasury account on Asset Hub. let treasury_origin: RuntimeOrigin = - rococo_runtime::governance::pallet_custom_origins::Origin::Treasurer.into(); + rococo_governance::pallet_custom_origins::Origin::Treasurer.into(); let alice_location: Location = [Junction::AccountId32 { network: None, id: Rococo::account_id_of(ALICE).into() }] @@ -163,15 +162,12 @@ fn spend_roc_on_asset_hub() { #[test] fn create_and_claim_treasury_spend_in_usdt() { const ASSET_ID: u32 = 1984; - const SPEND_AMOUNT: u128 = 1_000_000; + const SPEND_AMOUNT: u128 = 10_000_000; // treasury location from a sibling parachain. let treasury_location: Location = Location::new(1, PalletInstance(18)); // treasury account on a sibling parachain. let treasury_account = - asset_hub_rococo_runtime::xcm_config::LocationToAccountId::convert_location( - &treasury_location, - ) - .unwrap(); + ahr_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); let asset_hub_location = v3::Location::new(0, v3::Junction::Parachain(AssetHubRococo::para_id().into())); let root = ::RuntimeOrigin::root(); @@ -190,13 +186,7 @@ fn create_and_claim_treasury_spend_in_usdt() { AssetHubRococo::execute_with(|| { type Assets = ::Assets; - // create an asset class and mint some assets to the treasury account. - assert_ok!(>::create( - ASSET_ID, - treasury_account.clone(), - true, - SPEND_AMOUNT / 2 - )); + // USDT created at genesis, mint some assets to the treasury account. assert_ok!(>::mint_into(ASSET_ID, &treasury_account, SPEND_AMOUNT * 4)); // beneficiary has zero balance. assert_eq!(>::balance(ASSET_ID, &alice,), 0u128,); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index e0f29cd801c346a064a4773efa5754b0e2f399f4..6b50b6f473ed087e55ba994fd7cae0f97c48dace 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -11,38 +11,35 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -assert_matches = "1.5.0" +codec = { workspace = true } +assert_matches = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -sp-keyring = { path = "../../../../../../../substrate/primitives/keyring", default-features = false } -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -frame-metadata-hash-extension = { path = "../../../../../../../substrate/frame/metadata-hash-extension" } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../../../substrate/frame/system", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-transaction-payment = { path = "../../../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-asset-tx-payment = { path = "../../../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } +sp-runtime = { workspace = true } +sp-keyring = { workspace = true } +sp-core = { workspace = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-treasury = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-asset-tx-payment = { workspace = true } # Polkadot -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } -westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } +polkadot-runtime-common = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } -asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } -asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -westend-system-emulated-network = { path = "../../../networks/westend-system" } +parachains-common = { workspace = true, default-features = true } +asset-test-utils = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +emulated-integration-tests-common = { workspace = true } +westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 1c4a0ef4c8d2af7f773fbb6916391012ec9fdfc2..db8ada3f4ea281a61e9daaf3fa2cc2ba19747090 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -46,15 +46,33 @@ mod imports { pub use parachains_common::{AccountId, Balance}; pub use westend_system_emulated_network::{ asset_hub_westend_emulated_chain::{ + asset_hub_westend_runtime::{ + xcm_config::{ + self as ahw_xcm_config, WestendLocation as RelayLocation, + XcmConfig as AssetHubWestendXcmConfig, + }, + AssetConversionOrigin as AssetHubWestendAssetConversionOrigin, + }, genesis::{AssetHubWestendAssetOwner, ED as ASSET_HUB_WESTEND_ED}, AssetHubWestendParaPallet as AssetHubWestendPallet, }, collectives_westend_emulated_chain::CollectivesWestendParaPallet as CollectivesWestendPallet, penpal_emulated_chain::{ + penpal_runtime::xcm_config::{ + CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, + LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, + LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, + }, PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet, }, - westend_emulated_chain::{genesis::ED as WESTEND_ED, WestendRelayPallet as WestendPallet}, + westend_emulated_chain::{ + genesis::ED as WESTEND_ED, + westend_runtime::xcm_config::{ + UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, + }, + WestendRelayPallet as WestendPallet, + }, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, AssetHubWestendParaSender as AssetHubWestendSender, @@ -66,18 +84,6 @@ mod imports { WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, }; - // Runtimes - pub use asset_hub_westend_runtime::xcm_config::{ - WestendLocation as RelayLocation, XcmConfig as AssetHubWestendXcmConfig, - }; - pub use penpal_runtime::xcm_config::{ - LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, - LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - }; - pub use westend_runtime::xcm_config::{ - UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, - }; - pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs index 2d02e90f47fb834326a7268394671f5ef03f84e7..15f4fe33bddc1b65f079832facc4ce495823ad84 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs @@ -28,10 +28,7 @@ fn create_and_claim_treasury_spend() { Location::new(1, [Parachain(CollectivesWestend::para_id().into()), PalletInstance(65)]); // treasury account on a sibling parachain. let treasury_account = - asset_hub_westend_runtime::xcm_config::LocationToAccountId::convert_location( - &treasury_location, - ) - .unwrap(); + ahw_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); let asset_hub_location = Location::new(1, [Parachain(AssetHubWestend::para_id().into())]); let root = ::RuntimeOrigin::root(); // asset kind to be spent from the treasury. diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs index d39c72c7c5f0d21815ca0091e9fd888b5ab54924..49dfe8d58394c9ae7b69fba75e8147e7fa94e2db 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs @@ -170,7 +170,7 @@ fn transfer_foreign_assets_from_asset_hub_to_para() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Rococo)]).encode(), )], )); @@ -300,7 +300,7 @@ fn transfer_foreign_assets_from_para_to_asset_hub() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Rococo)]).encode(), )], )); @@ -455,7 +455,7 @@ fn transfer_foreign_assets_from_para_to_para_through_asset_hub() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Rococo)]).encode(), )], )); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index f6b6580988658f5fadead0250a6bcc886c9f125d..cf429378cf6d8eb79eb0c53a9da1c882fc86537a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -17,10 +17,8 @@ use crate::imports::*; #[test] fn swap_locally_on_chain_using_local_assets() { - let asset_native = Box::new( - v3::Location::try_from(asset_hub_westend_runtime::xcm_config::WestendLocation::get()) - .expect("conversion works"), - ); + let asset_native = + Box::new(v3::Location::try_from(RelayLocation::get()).expect("conversion works")); let asset_one = Box::new(v3::Location { parents: 0, interior: [ @@ -229,12 +227,12 @@ fn swap_locally_on_chain_using_foreign_assets() { #[test] fn cannot_create_pool_from_pool_assets() { - let asset_native = asset_hub_westend_runtime::xcm_config::WestendLocation::get(); - let mut asset_one = asset_hub_westend_runtime::xcm_config::PoolAssetsPalletLocation::get(); + let asset_native = RelayLocation::get(); + let mut asset_one = ahw_xcm_config::PoolAssetsPalletLocation::get(); asset_one.append_with(GeneralIndex(ASSET_ID.into())).expect("pool assets"); AssetHubWestend::execute_with(|| { - let pool_owner_account_id = asset_hub_westend_runtime::AssetConversionOrigin::get(); + let pool_owner_account_id = AssetHubWestendAssetConversionOrigin::get(); assert_ok!(::PoolAssets::create( ::RuntimeOrigin::signed(pool_owner_account_id.clone()), @@ -264,9 +262,7 @@ fn cannot_create_pool_from_pool_assets() { #[test] fn pay_xcm_fee_with_some_asset_swapped_for_native() { - let asset_native = - v3::Location::try_from(asset_hub_westend_runtime::xcm_config::WestendLocation::get()) - .expect("conversion works"); + let asset_native = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); let asset_one = xcm::v3::Location { parents: 0, interior: [ diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs index 6d8c0f5e5de6ae70559f5d60545959db5e74f735..8cbce3e0d223277e240e159e99f76696b7b339be 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs @@ -27,10 +27,7 @@ fn create_and_claim_treasury_spend() { let treasury_location: Location = Location::new(1, PalletInstance(37)); // treasury account on a sibling parachain. let treasury_account = - asset_hub_westend_runtime::xcm_config::LocationToAccountId::convert_location( - &treasury_location, - ) - .unwrap(); + ahw_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); let asset_hub_location = Location::new(0, Parachain(AssetHubWestend::para_id().into())); let root = ::RuntimeOrigin::root(); // asset kind to be spend from the treasury. diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs index dc89ef1f7a44e6afc218de787bc47d452ae50fa8..c01aa7825336348f5077c89a73c61e7b128a6200 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs @@ -18,7 +18,7 @@ use crate::imports::*; use frame_system::RawOrigin; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::runtime_decl_for_dry_run_api::DryRunApiV1, fees::runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index bed5af92f6e55b37f7f12518dbe1ed1b290dd5aa..a5787885329d75a05fe50ce18690fd6d4076db51 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -11,40 +11,38 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -hex-literal = "0.4.1" +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } # Substrate -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true, default-features = true } +sp-runtime = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +pallet-xcm = { workspace = true } +xcm-executor = { workspace = true } # Bridges -pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false } +pallet-bridge-messages = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["rococo"] } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } -rococo-system-emulated-network = { path = "../../../networks/rococo-system" } -asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo", default-features = false } +cumulus-pallet-xcmp-queue = { workspace = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } +rococo-system-emulated-network = { workspace = true } +rococo-westend-system-emulated-network = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } # Snowbridge -snowbridge-core = { path = "../../../../../../../bridges/snowbridge/primitives/core", default-features = false } -snowbridge-router-primitives = { path = "../../../../../../../bridges/snowbridge/primitives/router", default-features = false } -snowbridge-pallet-system = { path = "../../../../../../../bridges/snowbridge/pallets/system", default-features = false } -snowbridge-pallet-outbound-queue = { path = "../../../../../../../bridges/snowbridge/pallets/outbound-queue", default-features = false } -snowbridge-pallet-inbound-queue-fixtures = { path = "../../../../../../../bridges/snowbridge/pallets/inbound-queue/fixtures" } +snowbridge-core = { workspace = true } +snowbridge-router-primitives = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-pallet-outbound-queue = { workspace = true } +snowbridge-pallet-inbound-queue-fixtures = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index 0415af580ef8add90c92620e93052e356abe2de9..04466a611c71318280d135324ba3de418f9348d9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -35,19 +35,30 @@ mod imports { xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, TestExt, }, + ASSETS_PALLET_ID, USDT_ID, }; pub use parachains_common::AccountId; pub use rococo_westend_system_emulated_network::{ asset_hub_rococo_emulated_chain::{ - genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + asset_hub_rococo_runtime::xcm_config as ahr_xcm_config, + genesis::{AssetHubRococoAssetOwner, ED as ASSET_HUB_ROCOCO_ED}, + AssetHubRococoParaPallet as AssetHubRococoPallet, }, asset_hub_westend_emulated_chain::{ genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, }, bridge_hub_rococo_emulated_chain::{ - genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoParaPallet as BridgeHubRococoPallet, + genesis::ED as BRIDGE_HUB_ROCOCO_ED, + BridgeHubRococoParaPallet as BridgeHubRococoPallet, BridgeHubRococoRuntimeOrigin, + BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue, + }, + penpal_emulated_chain::{ + penpal_runtime::xcm_config::{ + CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, + UniversalLocation as PenpalUniversalLocation, + }, + PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner, }, - penpal_emulated_chain::{PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner}, rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index 87fb70e4de23857bf929ed1a663fd2dcc3120e93..6053936487b26e97fe8575cd5b666a03cff7ccf6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -15,168 +15,140 @@ use crate::tests::*; -fn send_asset_from_asset_hub_rococo_to_asset_hub_westend(id: Location, amount: u128) { - let destination = asset_hub_westend_location(); - +fn send_assets_over_bridge(send_fn: F) { // fund the AHR's SA on BHR for paying bridge transport fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // set XCM versions - AssetHubRococo::force_xcm_version(destination.clone(), XCM_VERSION); + let local_asset_hub = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + PenpalA::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); + AssetHubRococo::force_xcm_version(asset_hub_westend_location(), XCM_VERSION); BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), XCM_VERSION); // send message over bridge - assert_ok!(send_asset_from_asset_hub_rococo(destination, (id, amount))); + send_fn(); + + // process and verify intermediary hops assert_bridge_hub_rococo_message_accepted(true); assert_bridge_hub_westend_message_received(); } -fn send_asset_from_penpal_rococo_through_local_asset_hub_to_westend_asset_hub( - id: Location, - transfer_amount: u128, -) { - let destination = asset_hub_westend_location(); - let local_asset_hub: Location = PenpalA::sibling_location_of(AssetHubRococo::para_id()); - let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( - AssetHubRococo::sibling_location_of(PenpalA::para_id()), - ); - let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - Westend, - AssetHubWestend::para_id(), - ); - - // fund the AHR's SA on BHR for paying bridge transport fees - BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); +fn set_up_rocs_for_penpal_rococo_through_ahr_to_ahw( + sender: &AccountId, + amount: u128, +) -> (Location, v3::Location) { + let roc_at_rococo_parachains = roc_at_ah_rococo(); + let roc_at_asset_hub_westend = bridged_roc_at_ah_westend().try_into().unwrap(); + create_foreign_on_ah_westend(roc_at_asset_hub_westend, true); - // set XCM versions - PenpalA::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); - AssetHubRococo::force_xcm_version(destination.clone(), XCM_VERSION); - BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), XCM_VERSION); - - // send message over bridge - assert_ok!(PenpalA::execute_with(|| { - let signed_origin = ::RuntimeOrigin::signed(PenpalASender::get()); - let beneficiary: Location = - AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); - let assets: Assets = (id.clone(), transfer_amount).into(); - let fees_id: AssetId = id.into(); - let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { - assets: Wild(AllCounted(assets.len() as u32)), - beneficiary, - }]); + let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); + // fund Penpal's sovereign account on AssetHub + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount * 2)]); + // fund Penpal's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + roc_at_rococo_parachains.clone(), + sender.clone(), + amount * 2, + ); + (roc_at_rococo_parachains, roc_at_asset_hub_westend) +} - ::PolkadotXcm::transfer_assets_using_type_and_then( - signed_origin, - bx!(destination.into()), - bx!(assets.clone().into()), - bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), - bx!(fees_id.into()), - bx!(TransferType::RemoteReserve(local_asset_hub.into())), - bx!(VersionedXcm::from(custom_xcm_on_dest)), - WeightLimit::Unlimited, - ) - })); - AssetHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( - AssetHubRococo, - vec![ - // Amount to reserve transfer is withdrawn from Penpal's sovereign account - RuntimeEvent::Balances( - pallet_balances::Event::Burned { who, amount } - ) => { - who: *who == sov_penpal_on_ahr.clone().into(), - amount: *amount == transfer_amount, - }, - // Amount deposited in AHW's sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == sov_ahw_on_ahr.clone().into(), - }, - RuntimeEvent::XcmpQueue( - cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } - ) => {}, - ] +fn send_assets_from_penpal_rococo_through_rococo_ah_to_westend_ah( + destination: Location, + assets: (Assets, TransferType), + fees: (AssetId, TransferType), + custom_xcm_on_dest: Xcm<()>, +) { + send_assets_over_bridge(|| { + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), ); + let sov_ahw_on_ahr = + AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + Westend, + AssetHubWestend::para_id(), + ); + // send message over bridge + assert_ok!(PenpalA::execute_with(|| { + let signed_origin = ::RuntimeOrigin::signed(PenpalASender::get()); + ::PolkadotXcm::transfer_assets_using_type_and_then( + signed_origin, + bx!(destination.into()), + bx!(assets.0.into()), + bx!(assets.1), + bx!(fees.0.into()), + bx!(fees.1), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify intermediary AH Rococo hop + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is withdrawn from Penpal's sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, .. } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + }, + // Amount deposited in AHW's sovereign account + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == sov_ahw_on_ahr.clone().into(), + }, + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); }); - assert_bridge_hub_rococo_message_accepted(true); - assert_bridge_hub_westend_message_received(); } #[test] -fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { - let roc_at_asset_hub_rococo: v3::Location = v3::Parent.into(); - let roc_at_asset_hub_westend = - v3::Location::new(2, [v3::Junction::GlobalConsensus(v3::NetworkId::Rococo)]); - let owner: AccountId = AssetHubWestend::account_id_of(ALICE); - AssetHubWestend::force_create_foreign_asset( - roc_at_asset_hub_westend, - owner, - true, - ASSET_MIN_BALANCE, - vec![], - ); +/// Test transfer of ROC, USDT and wETH from AssetHub Rococo to AssetHub Westend. +/// +/// This mix of assets should cover the whole range: +/// - native assets: ROC, +/// - trust-based assets: USDT (exists only on Rococo, Westend gets it from Rococo over bridge), +/// - foreign asset / bridged asset (other bridge / Snowfork): wETH (bridged from Ethereum to Rococo +/// over Snowbridge, then bridged over to Westend through this bridge). +fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { + let amount = ASSET_HUB_ROCOCO_ED * 1_000_000; + let sender = AssetHubRococoSender::get(); + let receiver = AssetHubWestendReceiver::get(); + let roc_at_asset_hub_rococo: v3::Location = roc_at_ah_rococo().try_into().unwrap(); + let bridged_roc_at_asset_hub_westend = bridged_roc_at_ah_westend().try_into().unwrap(); + + create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend, true); + set_up_pool_with_wnd_on_ah_westend(bridged_roc_at_asset_hub_westend); + + //////////////////////////////////////////////////////////// + // Let's first send over just some ROCs as a simple example + //////////////////////////////////////////////////////////// let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( Westend, AssetHubWestend::para_id(), ); - - AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // setup a pool to pay xcm fees with `roc_at_asset_hub_westend` tokens - assert_ok!(::ForeignAssets::mint( - ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - roc_at_asset_hub_westend.into(), - AssetHubWestendSender::get().into(), - 3_000_000_000_000, - )); - - assert_ok!(::AssetConversion::create_pool( - ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(xcm::v3::Parent.into()), - Box::new(roc_at_asset_hub_westend), - )); - - assert_expected_events!( - AssetHubWestend, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, - ] - ); - - assert_ok!(::AssetConversion::add_liquidity( - ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(xcm::v3::Parent.into()), - Box::new(roc_at_asset_hub_westend), - 1_000_000_000_000, - 2_000_000_000_000, - 1, - 1, - AssetHubWestendSender::get().into() - )); - - assert_expected_events!( - AssetHubWestend, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {..}) => {}, - ] - ); - }); - let rocs_in_reserve_on_ahr_before = ::account_data_of(sov_ahw_on_ahr.clone()).free; - let sender_rocs_before = - ::account_data_of(AssetHubRococoSender::get()).free; - let receiver_rocs_before = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + let sender_rocs_before = ::account_data_of(sender.clone()).free; + let receiver_rocs_before = + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend, &receiver); + + // send ROCs, use them for fees + send_assets_over_bridge(|| { + let destination = asset_hub_westend_location(); + let assets: Assets = (Location::try_from(roc_at_asset_hub_rococo).unwrap(), amount).into(); + let fee_idx = 0; + assert_ok!(send_assets_from_asset_hub_rococo(destination, assets, fee_idx)); }); - let amount = ASSET_HUB_ROCOCO_ED * 1_000_000; - send_asset_from_asset_hub_rococo_to_asset_hub_westend( - roc_at_asset_hub_rococo.try_into().unwrap(), - amount, - ); + // verify expected events on final destination AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -185,7 +157,7 @@ fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { // issue ROCs on AHW RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { asset_id: *asset_id == roc_at_asset_hub_rococo, - owner: *owner == AssetHubWestendReceiver::get(), + owner: owner == &receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -195,36 +167,100 @@ fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { ); }); - let sender_rocs_after = - ::account_data_of(AssetHubRococoSender::get()).free; - let receiver_rocs_after = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) - }); + let sender_rocs_after = ::account_data_of(sender.clone()).free; + let receiver_rocs_after = + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend, &receiver); let rocs_in_reserve_on_ahr_after = ::account_data_of(sov_ahw_on_ahr.clone()).free; - // Sender's balance is reduced + // Sender's ROC balance is reduced assert!(sender_rocs_before > sender_rocs_after); - // Receiver's balance is increased + // Receiver's ROC balance is increased assert!(receiver_rocs_after > receiver_rocs_before); - // Reserve balance is increased by sent amount + // Reserve ROC balance is increased by sent amount assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before + amount); + + ///////////////////////////////////////////////////////////// + // Now let's send over USDTs + wETH (and pay fees with USDT) + ///////////////////////////////////////////////////////////// + + let usdt_at_asset_hub_rococo = usdt_at_ah_rococo(); + let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend().try_into().unwrap(); + // wETH has same relative location on both Rococo and Westend AssetHubs + let bridged_weth_at_ah = weth_at_asset_hubs().try_into().unwrap(); + + // mint USDT in sender's account (USDT already created in genesis) + AssetHubRococo::mint_asset( + ::RuntimeOrigin::signed(AssetHubRococoAssetOwner::get()), + USDT_ID, + sender.clone(), + amount * 2, + ); + // create wETH at src and dest and prefund sender's account + create_foreign_on_ah_rococo(bridged_weth_at_ah, true, vec![(sender.clone(), amount * 2)]); + create_foreign_on_ah_westend(bridged_weth_at_ah, true); + create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend, true); + set_up_pool_with_wnd_on_ah_westend(bridged_usdt_at_asset_hub_westend); + + let receiver_usdts_before = + foreign_balance_on_ah_westend(bridged_usdt_at_asset_hub_westend, &receiver); + let receiver_weth_before = foreign_balance_on_ah_westend(bridged_weth_at_ah, &receiver); + + // send USDTs and wETHs + let assets: Assets = vec![ + (usdt_at_asset_hub_rococo.clone(), amount).into(), + (Location::try_from(bridged_weth_at_ah).unwrap(), amount).into(), + ] + .into(); + // use USDT for fees + let fee: AssetId = usdt_at_asset_hub_rococo.into(); + + // use the more involved transfer extrinsic + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary: AccountId32Junction { network: None, id: receiver.clone().into() }.into(), + }]); + assert_ok!(AssetHubRococo::execute_with(|| { + ::PolkadotXcm::transfer_assets_using_type_and_then( + ::RuntimeOrigin::signed(sender.into()), + bx!(asset_hub_westend_location().into()), + bx!(assets.into()), + bx!(TransferType::LocalReserve), + bx!(fee.into()), + bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify hops (also advances the message through the hops) + assert_bridge_hub_rococo_message_accepted(true); + assert_bridge_hub_westend_message_received(); + AssetHubWestend::execute_with(|| { + AssetHubWestend::assert_xcmp_queue_success(None); + }); + + let receiver_usdts_after = + foreign_balance_on_ah_westend(bridged_usdt_at_asset_hub_westend, &receiver); + let receiver_weth_after = foreign_balance_on_ah_westend(bridged_weth_at_ah, &receiver); + + // Receiver's USDT balance is increased by almost `amount` (minus fees) + assert!(receiver_usdts_after > receiver_usdts_before); + assert!(receiver_usdts_after < receiver_usdts_before + amount); + // Receiver's wETH balance is increased by sent amount + assert_eq!(receiver_weth_after, receiver_weth_before + amount); } #[test] -fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { +/// Send bridged WNDs "back" from AssetHub Rococo to AssetHub Westend. +fn send_back_wnds_from_asset_hub_rococo_to_asset_hub_westend() { let prefund_amount = 10_000_000_000_000u128; - let wnd_at_asset_hub_rococo = - v3::Location::new(2, [v3::Junction::GlobalConsensus(v3::NetworkId::Westend)]); - let owner: AccountId = AssetHubRococo::account_id_of(ALICE); - AssetHubRococo::force_create_foreign_asset( - wnd_at_asset_hub_rococo, - owner, - true, - ASSET_MIN_BALANCE, - vec![(AssetHubRococoSender::get(), prefund_amount)], - ); + let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; + let sender = AssetHubRococoSender::get(); + let receiver = AssetHubWestendReceiver::get(); + let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo(); + let wnd_at_asset_hub_rococo_v3 = wnd_at_asset_hub_rococo.clone().try_into().unwrap(); + let prefund_accounts = vec![(sender.clone(), prefund_amount)]; + create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo_v3, true, prefund_accounts); // fund the AHR's SA on AHW with the WND tokens held in reserve let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( @@ -236,19 +272,19 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { let wnds_in_reserve_on_ahw_before = ::account_data_of(sov_ahr_on_ahw.clone()).free; assert_eq!(wnds_in_reserve_on_ahw_before, prefund_amount); - let sender_wnds_before = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) - }); + + let sender_wnds_before = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo_v3, &sender); assert_eq!(sender_wnds_before, prefund_amount); - let receiver_wnds_before = - ::account_data_of(AssetHubWestendReceiver::get()).free; + let receiver_wnds_before = ::account_data_of(receiver.clone()).free; + + // send back WNDs, use them for fees + send_assets_over_bridge(|| { + let destination = asset_hub_westend_location(); + let assets: Assets = (wnd_at_asset_hub_rococo, amount_to_send).into(); + let fee_idx = 0; + assert_ok!(send_assets_from_asset_hub_rococo(destination, assets, fee_idx)); + }); - let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; - send_asset_from_asset_hub_rococo_to_asset_hub_westend( - Location::try_from(wnd_at_asset_hub_rococo).unwrap(), - amount_to_send, - ); AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -263,7 +299,7 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { }, // WNDs deposited to beneficiary RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == AssetHubWestendReceiver::get(), + who: who == &receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -273,12 +309,8 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { ); }); - let sender_wnds_after = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) - }); - let receiver_wnds_after = - ::account_data_of(AssetHubWestendReceiver::get()).free; + let sender_wnds_after = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo_v3, &sender); + let receiver_wnds_after = ::account_data_of(receiver).free; let wnds_in_reserve_on_ahw_after = ::account_data_of(sov_ahr_on_ahw).free; @@ -292,55 +324,47 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { #[test] fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() { - let roc_at_rococo_parachains: Location = Parent.into(); - let roc_at_asset_hub_westend = Location::new(2, [Junction::GlobalConsensus(NetworkId::Rococo)]); - let owner: AccountId = AssetHubWestend::account_id_of(ALICE); - AssetHubWestend::force_create_foreign_asset( - roc_at_asset_hub_westend.clone().try_into().unwrap(), - owner, - true, - ASSET_MIN_BALANCE, - vec![], - ); + let amount = ASSET_HUB_ROCOCO_ED * 10_000_000; + let sender = PenpalASender::get(); + let receiver = AssetHubWestendReceiver::get(); + let local_asset_hub = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let (roc_at_rococo_parachains, roc_at_asset_hub_westend) = + set_up_rocs_for_penpal_rococo_through_ahr_to_ahw(&sender, amount); + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( Westend, AssetHubWestend::para_id(), ); - - let amount = ASSET_HUB_ROCOCO_ED * 10_000_000; - let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); - let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); - // fund Penpal's sovereign account on AssetHub - AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount * 2)]); - // fund Penpal's sender account - PenpalA::mint_foreign_asset( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - roc_at_rococo_parachains.clone(), - PenpalASender::get(), - amount * 2, - ); - let rocs_in_reserve_on_ahr_before = ::account_data_of(sov_ahw_on_ahr.clone()).free; let sender_rocs_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance( - roc_at_rococo_parachains.clone(), - &PenpalASender::get(), - ) + >::balance(roc_at_rococo_parachains.clone(), &sender) }); - let receiver_rocs_before = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - roc_at_asset_hub_westend.clone().try_into().unwrap(), - &AssetHubWestendReceiver::get(), - ) - }); - send_asset_from_penpal_rococo_through_local_asset_hub_to_westend_asset_hub( - roc_at_rococo_parachains.clone(), - amount, - ); + let receiver_rocs_before = foreign_balance_on_ah_westend(roc_at_asset_hub_westend, &receiver); + + // Send ROCs over bridge + { + let destination = asset_hub_westend_location(); + let assets: Assets = (roc_at_rococo_parachains.clone(), amount).into(); + let asset_transfer_type = TransferType::RemoteReserve(local_asset_hub.clone().into()); + let fees_id: AssetId = roc_at_rococo_parachains.clone().into(); + let fees_transfer_type = TransferType::RemoteReserve(local_asset_hub.into()); + let beneficiary: Location = + AccountId32Junction { network: None, id: receiver.clone().into() }.into(); + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary, + }]); + send_assets_from_penpal_rococo_through_rococo_ah_to_westend_ah( + destination, + (assets, asset_transfer_type), + (fees_id, fees_transfer_type), + custom_xcm_on_dest, + ); + } + // process AHW incoming message and check events AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -349,7 +373,7 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() // issue ROCs on AHW RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { asset_id: *asset_id == roc_at_rococo_parachains.clone().try_into().unwrap(), - owner: *owner == AssetHubWestendReceiver::get(), + owner: owner == &receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -361,15 +385,9 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() let sender_rocs_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(roc_at_rococo_parachains, &PenpalASender::get()) - }); - let receiver_rocs_after = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - roc_at_asset_hub_westend.try_into().unwrap(), - &AssetHubWestendReceiver::get(), - ) + >::balance(roc_at_rococo_parachains, &sender) }); + let receiver_rocs_after = foreign_balance_on_ah_westend(roc_at_asset_hub_westend, &receiver); let rocs_in_reserve_on_ahr_after = ::account_data_of(sov_ahw_on_ahr.clone()).free; @@ -381,3 +399,121 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() assert!(rocs_in_reserve_on_ahr_after > rocs_in_reserve_on_ahr_before); assert!(rocs_in_reserve_on_ahr_after <= rocs_in_reserve_on_ahr_before + amount); } + +#[test] +fn send_back_wnds_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() { + let wnd_at_rococo_parachains = bridged_wnd_at_ah_rococo(); + let wnd_at_rococo_parachains_v3 = wnd_at_rococo_parachains.clone().try_into().unwrap(); + let amount = ASSET_HUB_ROCOCO_ED * 10_000_000; + let sender = PenpalASender::get(); + let receiver = AssetHubWestendReceiver::get(); + + // set up ROCs for transfer + let (roc_at_rococo_parachains, _) = + set_up_rocs_for_penpal_rococo_through_ahr_to_ahw(&sender, amount); + + // set up WNDs for transfer + let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); + let prefund_accounts = vec![(sov_penpal_on_ahr, amount * 2)]; + create_foreign_on_ah_rococo(wnd_at_rococo_parachains_v3, true, prefund_accounts); + let asset_owner: AccountId = AssetHubRococo::account_id_of(ALICE); + PenpalA::force_create_foreign_asset( + wnd_at_rococo_parachains.clone(), + asset_owner.clone(), + true, + ASSET_MIN_BALANCE, + vec![(sender.clone(), amount * 2)], + ); + + // fund the AHR's SA on AHW with the WND tokens held in reserve + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + AssetHubWestend::fund_accounts(vec![(sov_ahr_on_ahw.clone(), amount * 2)]); + + // balances before + let sender_wnds_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.clone().into(), &sender) + }); + let receiver_wnds_before = ::account_data_of(receiver.clone()).free; + + // send WNDs over the bridge, ROCs only used to pay fees on local AH, pay with WND on remote AH + { + let final_destination = asset_hub_westend_location(); + let intermediary_hop = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let context = PenpalA::execute_with(|| PenpalUniversalLocation::get()); + + // what happens at final destination + let beneficiary = AccountId32Junction { network: None, id: receiver.clone().into() }.into(); + // use WND as fees on the final destination (AHW) + let remote_fees: Asset = (wnd_at_rococo_parachains.clone(), amount).into(); + let remote_fees = remote_fees.reanchored(&final_destination, &context).unwrap(); + // buy execution using WNDs, then deposit all remaining WNDs + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: WeightLimit::Unlimited }, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary }, + ]); + + // what happens at intermediary hop + // reanchor final dest (Asset Hub Westend) to the view of hop (Asset Hub Rococo) + let mut final_destination = final_destination.clone(); + final_destination.reanchor(&intermediary_hop, &context).unwrap(); + // reanchor WNDs to the view of hop (Asset Hub Rococo) + let asset: Asset = (wnd_at_rococo_parachains.clone(), amount).into(); + let asset = asset.reanchored(&intermediary_hop, &context).unwrap(); + // on Asset Hub Rococo, forward a request to withdraw WNDs from reserve on Asset Hub Westend + let xcm_on_hop = Xcm::<()>(vec![InitiateReserveWithdraw { + assets: Definite(asset.into()), // WNDs + reserve: final_destination, // AHW + xcm: xcm_on_final_dest, // XCM to execute on AHW + }]); + // assets to send from Penpal and how they reach the intermediary hop + let assets: Assets = vec![ + (wnd_at_rococo_parachains.clone(), amount).into(), + (roc_at_rococo_parachains.clone(), amount).into(), + ] + .into(); + let asset_transfer_type = TransferType::DestinationReserve; + let fees_id: AssetId = roc_at_rococo_parachains.into(); + let fees_transfer_type = TransferType::DestinationReserve; + + // initiate the transfer + send_assets_from_penpal_rococo_through_rococo_ah_to_westend_ah( + intermediary_hop, + (assets, asset_transfer_type), + (fees_id, fees_transfer_type), + xcm_on_hop, + ); + } + + // process AHW incoming message and check events + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // issue ROCs on AHW + RuntimeEvent::Balances(pallet_balances::Event::Issued { .. }) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.into(), &sender) + }); + let receiver_wnds_after = ::account_data_of(receiver).free; + + // Sender's balance is reduced by sent "amount" + assert_eq!(sender_wnds_after, sender_wnds_before - amount); + // Receiver's balance is increased by no more than "amount" + assert!(receiver_wnds_after > receiver_wnds_before); + assert!(receiver_wnds_after <= receiver_wnds_before + amount); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs index 88dad06434b0d4a28295708c303907e02e70927b..58c52e1328c81f60c9a60d3fd9677a1bf8f66e95 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -21,39 +21,141 @@ mod snowbridge; mod teleport; pub(crate) fn asset_hub_westend_location() -> Location { + Location::new(2, [GlobalConsensus(Westend), Parachain(AssetHubWestend::para_id().into())]) +} +pub(crate) fn bridge_hub_westend_location() -> Location { + Location::new(2, [GlobalConsensus(Westend), Parachain(BridgeHubWestend::para_id().into())]) +} + +// ROC and wROC +pub(crate) fn roc_at_ah_rococo() -> Location { + Parent.into() +} +pub(crate) fn bridged_roc_at_ah_westend() -> Location { + Location::new(2, [GlobalConsensus(Rococo)]) +} + +// wWND +pub(crate) fn bridged_wnd_at_ah_rococo() -> Location { + Location::new(2, [GlobalConsensus(Westend)]) +} + +// USDT and wUSDT +pub(crate) fn usdt_at_ah_rococo() -> Location { + Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())]) +} +pub(crate) fn bridged_usdt_at_ah_westend() -> Location { Location::new( 2, - [GlobalConsensus(NetworkId::Westend), Parachain(AssetHubWestend::para_id().into())], + [ + GlobalConsensus(Rococo), + Parachain(AssetHubRococo::para_id().into()), + PalletInstance(ASSETS_PALLET_ID), + GeneralIndex(USDT_ID.into()), + ], ) } -pub(crate) fn bridge_hub_westend_location() -> Location { +// wETH has same relative location on both Rococo and Westend AssetHubs +pub(crate) fn weth_at_asset_hubs() -> Location { Location::new( 2, - [GlobalConsensus(NetworkId::Westend), Parachain(BridgeHubWestend::para_id().into())], + [ + GlobalConsensus(Ethereum { chain_id: snowbridge::CHAIN_ID }), + AccountKey20 { network: None, key: snowbridge::WETH }, + ], ) } -pub(crate) fn send_asset_from_asset_hub_rococo( +pub(crate) fn create_foreign_on_ah_rococo( + id: v3::Location, + sufficient: bool, + prefund_accounts: Vec<(AccountId, u128)>, +) { + let owner = AssetHubRococo::account_id_of(ALICE); + let min = ASSET_MIN_BALANCE; + AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); +} + +pub(crate) fn create_foreign_on_ah_westend(id: v3::Location, sufficient: bool) { + let owner = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); +} + +pub(crate) fn foreign_balance_on_ah_rococo(id: v3::Location, who: &AccountId) -> u128 { + AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(id, who) + }) +} +pub(crate) fn foreign_balance_on_ah_westend(id: v3::Location, who: &AccountId) -> u128 { + AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(id, who) + }) +} + +// set up pool +pub(crate) fn set_up_pool_with_wnd_on_ah_westend(foreign_asset: v3::Location) { + let wnd: v3::Location = v3::Parent.into(); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + let owner = AssetHubWestendSender::get(); + let signed_owner = ::RuntimeOrigin::signed(owner.clone()); + + assert_ok!(::ForeignAssets::mint( + signed_owner.clone(), + foreign_asset.into(), + owner.clone().into(), + 3_000_000_000_000, + )); + assert_ok!(::AssetConversion::create_pool( + signed_owner.clone(), + Box::new(wnd), + Box::new(foreign_asset), + )); + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, + ] + ); + assert_ok!(::AssetConversion::add_liquidity( + signed_owner.clone(), + Box::new(wnd), + Box::new(foreign_asset), + 1_000_000_000_000, + 2_000_000_000_000, + 1, + 1, + owner.into() + )); + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {..}) => {}, + ] + ); + }); +} + +pub(crate) fn send_assets_from_asset_hub_rococo( destination: Location, - (id, amount): (Location, u128), + assets: Assets, + fee_idx: u32, ) -> DispatchResult { let signed_origin = ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); - let beneficiary: Location = AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); - let assets: Assets = (id, amount).into(); - let fee_asset_item = 0; - AssetHubRococo::execute_with(|| { ::PolkadotXcm::limited_reserve_transfer_assets( signed_origin, bx!(destination.into()), bx!(beneficiary.into()), bx!(assets.into()), - fee_asset_item, + fee_idx, WeightLimit::Unlimited, ) }) diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index 78788634e6ff45c10b5fbebc91da4843d8f595e3..652447fa56010918d8816187207bc86e2ae99ea5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -81,7 +81,11 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // send XCM from AssetHubRococo - fails - destination version not known assert_err!( - send_asset_from_asset_hub_rococo(destination.clone(), (native_token.clone(), amount)), + send_assets_from_asset_hub_rococo( + destination.clone(), + (native_token.clone(), amount).into(), + 0 + ), DispatchError::Module(sp_runtime::ModuleError { index: 31, error: [1, 0, 0, 0], @@ -98,9 +102,10 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { newer_xcm_version, ); // send XCM from AssetHubRococo - ok - assert_ok!(send_asset_from_asset_hub_rococo( + assert_ok!(send_assets_from_asset_hub_rococo( destination.clone(), - (native_token.clone(), amount) + (native_token.clone(), amount).into(), + 0, )); // `ExportMessage` on local BridgeHub - fails - remote BridgeHub version not known @@ -115,9 +120,10 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ); // send XCM from AssetHubRococo - ok - assert_ok!(send_asset_from_asset_hub_rococo( + assert_ok!(send_assets_from_asset_hub_rococo( destination.clone(), - (native_token.clone(), amount) + (native_token.clone(), amount).into(), + 0, )); assert_bridge_hub_rococo_message_accepted(true); assert_bridge_hub_westend_message_received(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 8196b27cfe028087e1a6279d4bac1f6c0cf691e5..40a1968ec557bd99730fd27e151a4aa9f2f2cfd9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -13,12 +13,10 @@ // See the License for the specific language governing permissions and // limitations under the License. use crate::imports::*; -use bridge_hub_rococo_runtime::{EthereumBeaconClient, EthereumInboundQueue, RuntimeOrigin}; use codec::{Decode, Encode}; use emulated_integration_tests_common::xcm_emulator::ConvertLocation; use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; -use rococo_system_emulated_network::penpal_emulated_chain::CustomizableAssetFromSystemAssetHub; use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender; use snowbridge_core::{inbound::InboundQueueFixture, outbound::OperatingMode}; use snowbridge_pallet_inbound_queue_fixtures::{ @@ -34,10 +32,10 @@ use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; const INITIAL_FUND: u128 = 5_000_000_000 * ROCOCO_ED; -const CHAIN_ID: u64 = 11155111; +pub const CHAIN_ID: u64 = 11155111; const TREASURY_ACCOUNT: [u8; 32] = hex!("6d6f646c70792f74727372790000000000000000000000000000000000000000"); -const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); +pub const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); const INSUFFICIENT_XCM_FEE: u128 = 1000; const XCM_FEE: u128 = 4_000_000_000; @@ -64,7 +62,7 @@ pub fn send_inbound_message(fixture: InboundQueueFixture) -> DispatchResult { ) .unwrap(); EthereumInboundQueue::submit( - RuntimeOrigin::signed(BridgeHubRococoSender::get()), + BridgeHubRococoRuntimeOrigin::signed(BridgeHubRococoSender::get()), fixture.message, ) } @@ -298,7 +296,7 @@ fn send_token_from_ethereum_to_penpal() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]).encode(), )], )); @@ -379,7 +377,7 @@ fn send_token_from_ethereum_to_penpal() { /// - returning the token to Ethereum #[test] fn send_weth_asset_from_asset_hub_to_ethereum() { - use asset_hub_rococo_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; + use ahr_xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; let assethub_location = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id()); let assethub_sovereign = BridgeHubRococo::sovereign_account_id_of(assethub_location); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs index 8f51f5b180004d3f694cc68ba6d3c11ab46df95a..1fb03748d926c9a422ede319977c5e39c0f24b16 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs @@ -13,8 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::tests::*; -use bridge_hub_rococo_runtime::xcm_config::XcmConfig; +use crate::imports::*; #[test] fn teleport_to_other_system_parachains_works() { @@ -22,9 +21,9 @@ fn teleport_to_other_system_parachains_works() { let native_asset: Assets = (Parent, amount).into(); test_parachain_is_trusted_teleporter!( - BridgeHubRococo, // Origin - XcmConfig, // XCM configuration - vec![AssetHubRococo], // Destinations + BridgeHubRococo, // Origin + BridgeHubRococoXcmConfig, // XCM configuration + vec![AssetHubRococo], // Destinations (native_asset, amount) ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 6aebf8862d62e794a24e6926ff487d1956ed792b..6b83479eaf89a5677a77816b748948c8ba93fa53 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,26 +11,26 @@ publish = false workspace = true [dependencies] +hex-literal = { workspace = true, default-features = true } # Substrate -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true, default-features = true } +sp-runtime = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +pallet-xcm = { workspace = true } +xcm-executor = { workspace = true } # Bridges -pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false } +pallet-bridge-messages = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-westend-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-westend", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } +cumulus-pallet-xcmp-queue = { workspace = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } +rococo-westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index 36b846e103131882e36b899bdb323d9b969cddde..3b0fcea57a26f3e06080c28e9d6495f1b71e3680 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -36,20 +36,25 @@ mod imports { xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, TestExt, }, + ASSETS_PALLET_ID, USDT_ID, }; pub use parachains_common::AccountId; pub use rococo_westend_system_emulated_network::{ asset_hub_rococo_emulated_chain::{ - genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + genesis::{AssetHubRococoAssetOwner, ED as ASSET_HUB_ROCOCO_ED}, + AssetHubRococoParaPallet as AssetHubRococoPallet, }, asset_hub_westend_emulated_chain::{ genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, }, bridge_hub_westend_emulated_chain::{ genesis::ED as BRIDGE_HUB_WESTEND_ED, - BridgeHubWestendParaPallet as BridgeHubWestendPallet, + BridgeHubWestendParaPallet as BridgeHubWestendPallet, BridgeHubWestendXcmConfig, + }, + penpal_emulated_chain::{ + penpal_runtime::xcm_config::UniversalLocation as PenpalUniversalLocation, + PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet, }, - penpal_emulated_chain::{PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet}, westend_emulated_chain::WestendRelayPallet as WestendPallet, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index 597e77d9049cf030fc2b0f0d8e986da53f1f08e2..0c0b04cd45a91c42592052799ba9580bd6e4219a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -14,165 +14,132 @@ // limitations under the License. use crate::tests::*; -fn send_asset_from_asset_hub_westend_to_asset_hub_rococo(id: Location, amount: u128) { - let destination = asset_hub_rococo_location(); - +fn send_assets_over_bridge(send_fn: F) { // fund the AHW's SA on BHW for paying bridge transport fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // set XCM versions - AssetHubWestend::force_xcm_version(destination.clone(), XCM_VERSION); + let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + PenpalB::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); + AssetHubWestend::force_xcm_version(asset_hub_rococo_location(), XCM_VERSION); BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), XCM_VERSION); // send message over bridge - assert_ok!(send_asset_from_asset_hub_westend(destination, (id, amount))); + send_fn(); + + // process and verify intermediary hops assert_bridge_hub_westend_message_accepted(true); assert_bridge_hub_rococo_message_received(); } -fn send_asset_from_penpal_westend_through_local_asset_hub_to_rococo_asset_hub( - id: Location, - transfer_amount: u128, -) { - let destination = asset_hub_rococo_location(); - let local_asset_hub: Location = PenpalB::sibling_location_of(AssetHubWestend::para_id()); - let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of( - AssetHubWestend::sibling_location_of(PenpalB::para_id()), - ); - let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( - Rococo, - AssetHubRococo::para_id(), - ); - - // fund the AHW's SA on BHW for paying bridge transport fees - BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); +fn set_up_wnds_for_penpal_westend_through_ahw_to_ahr( + sender: &AccountId, + amount: u128, +) -> (Location, v3::Location) { + let wnd_at_westend_parachains = wnd_at_ah_westend(); + let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo().try_into().unwrap(); + create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo, true); - // set XCM versions - PenpalB::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); - AssetHubWestend::force_xcm_version(destination.clone(), XCM_VERSION); - BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), XCM_VERSION); - - // send message over bridge - assert_ok!(PenpalB::execute_with(|| { - let signed_origin = ::RuntimeOrigin::signed(PenpalBSender::get()); - let beneficiary: Location = - AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); - let assets: Assets = (id.clone(), transfer_amount).into(); - let fees_id: AssetId = id.into(); - let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { - assets: Wild(AllCounted(assets.len() as u32)), - beneficiary, - }]); + let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location); + // fund Penpal's sovereign account on AssetHub + AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahw.into(), amount * 2)]); + // fund Penpal's sender account + PenpalB::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + wnd_at_westend_parachains.clone(), + sender.clone(), + amount * 2, + ); + (wnd_at_westend_parachains, wnd_at_asset_hub_rococo) +} - ::PolkadotXcm::transfer_assets_using_type_and_then( - signed_origin, - bx!(destination.into()), - bx!(assets.into()), - bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), - bx!(fees_id.into()), - bx!(TransferType::RemoteReserve(local_asset_hub.into())), - bx!(VersionedXcm::from(custom_xcm_on_dest)), - WeightLimit::Unlimited, - ) - })); - AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( - AssetHubWestend, - vec![ - // Amount to reserve transfer is withdrawn from Penpal's sovereign account - RuntimeEvent::Balances( - pallet_balances::Event::Burned { who, amount } - ) => { - who: *who == sov_penpal_on_ahw.clone().into(), - amount: *amount == transfer_amount, - }, - // Amount deposited in AHR's sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == sov_ahr_on_ahw.clone().into(), - }, - RuntimeEvent::XcmpQueue( - cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } - ) => {}, - ] +fn send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah( + destination: Location, + assets: (Assets, TransferType), + fees: (AssetId, TransferType), + custom_xcm_on_dest: Xcm<()>, +) { + send_assets_over_bridge(|| { + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), ); + let sov_ahr_on_ahw = + AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + Rococo, + AssetHubRococo::para_id(), + ); + + // send message over bridge + assert_ok!(PenpalB::execute_with(|| { + let signed_origin = ::RuntimeOrigin::signed(PenpalBSender::get()); + ::PolkadotXcm::transfer_assets_using_type_and_then( + signed_origin, + bx!(destination.into()), + bx!(assets.0.into()), + bx!(assets.1), + bx!(fees.0.into()), + bx!(fees.1), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify intermediary AH Westend hop + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount to reserve transfer is withdrawn from Penpal's sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, .. } + ) => { + who: *who == sov_penpal_on_ahw.clone().into(), + }, + // Amount deposited in AHR's sovereign account + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == sov_ahr_on_ahw.clone().into(), + }, + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); }); - assert_bridge_hub_westend_message_accepted(true); - assert_bridge_hub_rococo_message_received(); } #[test] +/// Test transfer of WND from AssetHub Westend to AssetHub Rococo. fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { - let wnd_at_asset_hub_westend: Location = Parent.into(); - let wnd_at_asset_hub_rococo = - v3::Location::new(2, [v3::Junction::GlobalConsensus(v3::NetworkId::Westend)]); - let owner: AccountId = AssetHubRococo::account_id_of(ALICE); - AssetHubRococo::force_create_foreign_asset( - wnd_at_asset_hub_rococo, - owner, - true, - ASSET_MIN_BALANCE, - vec![], - ); + let amount = ASSET_HUB_WESTEND_ED * 1_000; + let sender = AssetHubWestendSender::get(); + let receiver = AssetHubRococoReceiver::get(); + let wnd_at_asset_hub_westend = wnd_at_ah_westend(); + let bridged_wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo().try_into().unwrap(); + create_foreign_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, true); + + set_up_pool_with_roc_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, true); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( Rococo, AssetHubRococo::para_id(), ); - - AssetHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // setup a pool to pay xcm fees with `wnd_at_asset_hub_rococo` tokens - assert_ok!(::ForeignAssets::mint( - ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - wnd_at_asset_hub_rococo.into(), - AssetHubRococoSender::get().into(), - 3_000_000_000_000, - )); - - assert_ok!(::AssetConversion::create_pool( - ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - Box::new(xcm::v3::Parent.into()), - Box::new(wnd_at_asset_hub_rococo), - )); - - assert_expected_events!( - AssetHubRococo, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, - ] - ); - - assert_ok!(::AssetConversion::add_liquidity( - ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - Box::new(xcm::v3::Parent.into()), - Box::new(wnd_at_asset_hub_rococo), - 1_000_000_000_000, - 2_000_000_000_000, - 1, - 1, - AssetHubRococoSender::get().into() - )); - - assert_expected_events!( - AssetHubRococo, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {..}) => {}, - ] - ); - }); - let wnds_in_reserve_on_ahw_before = ::account_data_of(sov_ahr_on_ahw.clone()).free; - let sender_wnds_before = - ::account_data_of(AssetHubWestendSender::get()).free; - let receiver_wnds_before = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + let sender_wnds_before = ::account_data_of(sender.clone()).free; + let receiver_wnds_before = + foreign_balance_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, &receiver); + + // send WNDs, use them for fees + send_assets_over_bridge(|| { + let destination = asset_hub_rococo_location(); + let assets: Assets = (wnd_at_asset_hub_westend, amount).into(); + let fee_idx = 0; + assert_ok!(send_assets_from_asset_hub_westend(destination, assets, fee_idx)); }); - let amount = ASSET_HUB_WESTEND_ED * 1_000; - send_asset_from_asset_hub_westend_to_asset_hub_rococo(wnd_at_asset_hub_westend, amount); + // verify expected events on final destination AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -180,8 +147,8 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { vec![ // issue WNDs on AHR RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == wnd_at_asset_hub_rococo, - owner: *owner == AssetHubRococoReceiver::get(), + asset_id: *asset_id == bridged_wnd_at_asset_hub_rococo, + owner: *owner == receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -191,12 +158,9 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { ); }); - let sender_wnds_after = - ::account_data_of(AssetHubWestendSender::get()).free; - let receiver_wnds_after = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) - }); + let sender_wnds_after = ::account_data_of(sender).free; + let receiver_wnds_after = + foreign_balance_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, &receiver); let wnds_in_reserve_on_ahw_after = ::account_data_of(sov_ahr_on_ahw).free; @@ -209,18 +173,28 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { } #[test] -fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { +/// Send bridged assets "back" from AssetHub Rococo to AssetHub Westend. +/// +/// This mix of assets should cover the whole range: +/// - bridged native assets: ROC, +/// - bridged trust-based assets: USDT (exists only on Rococo, Westend gets it from Rococo over +/// bridge), +/// - bridged foreign asset / double-bridged asset (other bridge / Snowfork): wETH (bridged from +/// Ethereum to Rococo over Snowbridge, then bridged over to Westend through this bridge). +fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { let prefund_amount = 10_000_000_000_000u128; - let roc_at_asset_hub_westend = - v3::Location::new(2, [v3::Junction::GlobalConsensus(v3::NetworkId::Rococo)]); - let owner: AccountId = AssetHubWestend::account_id_of(ALICE); - AssetHubWestend::force_create_foreign_asset( - roc_at_asset_hub_westend, - owner, - true, - ASSET_MIN_BALANCE, - vec![(AssetHubWestendSender::get(), prefund_amount)], - ); + let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; + let sender = AssetHubWestendSender::get(); + let receiver = AssetHubRococoReceiver::get(); + let bridged_roc_at_asset_hub_westend = bridged_roc_at_ah_westend(); + let bridged_roc_at_asset_hub_westend_v3 = + bridged_roc_at_asset_hub_westend.clone().try_into().unwrap(); + let prefund_accounts = vec![(sender.clone(), prefund_amount)]; + create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend_v3, true, prefund_accounts); + + //////////////////////////////////////////////////////////// + // Let's first send back just some ROCs as a simple example + //////////////////////////////////////////////////////////// // fund the AHW's SA on AHR with the ROC tokens held in reserve let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( @@ -232,19 +206,20 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { let rocs_in_reserve_on_ahr_before = ::account_data_of(sov_ahw_on_ahr.clone()).free; assert_eq!(rocs_in_reserve_on_ahr_before, prefund_amount); - let sender_rocs_before = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) - }); + + let sender_rocs_before = + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend_v3, &sender); assert_eq!(sender_rocs_before, prefund_amount); - let receiver_rocs_before = - ::account_data_of(AssetHubRococoReceiver::get()).free; + let receiver_rocs_before = ::account_data_of(receiver.clone()).free; + + // send back ROCs, use them for fees + send_assets_over_bridge(|| { + let destination = asset_hub_rococo_location(); + let assets: Assets = (bridged_roc_at_asset_hub_westend, amount_to_send).into(); + let fee_idx = 0; + assert_ok!(send_assets_from_asset_hub_westend(destination, assets, fee_idx)); + }); - let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; - send_asset_from_asset_hub_westend_to_asset_hub_rococo( - roc_at_asset_hub_westend.try_into().unwrap(), - amount_to_send, - ); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -259,7 +234,7 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { }, // ROCs deposited to beneficiary RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == AssetHubRococoReceiver::get(), + who: *who == receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -269,12 +244,9 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { ); }); - let sender_rocs_after = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) - }); - let receiver_rocs_after = - ::account_data_of(AssetHubRococoReceiver::get()).free; + let sender_rocs_after = + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend_v3, &sender); + let receiver_rocs_after = ::account_data_of(receiver.clone()).free; let rocs_in_reserve_on_ahr_after = ::account_data_of(sov_ahw_on_ahr.clone()).free; @@ -284,59 +256,141 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { assert!(receiver_rocs_after > receiver_rocs_before); // Reserve balance is reduced by sent amount assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before - amount_to_send); + + ////////////////////////////////////////////////////////////////// + // Now let's send back over USDTs + wETH (and pay fees with USDT) + ////////////////////////////////////////////////////////////////// + + // wETH has same relative location on both Rococo and Westend AssetHubs + let bridged_weth_at_ah = weth_at_asset_hubs().try_into().unwrap(); + let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend().try_into().unwrap(); + + // set up destination chain AH Rococo: + // create a ROC/USDT pool to be able to pay fees with USDT (USDT created in genesis) + set_up_pool_with_roc_on_ah_rococo(usdt_at_ah_rococo().try_into().unwrap(), false); + // create wETH on Rococo (IRL it's already created by Snowbridge) + create_foreign_on_ah_rococo(bridged_weth_at_ah, true); + // prefund AHW's sovereign account on AHR to be able to withdraw USDT and wETH from reserves + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + Westend, + AssetHubWestend::para_id(), + ); + AssetHubRococo::mint_asset( + ::RuntimeOrigin::signed(AssetHubRococoAssetOwner::get()), + USDT_ID, + sov_ahw_on_ahr.clone(), + amount_to_send * 2, + ); + AssetHubRococo::mint_foreign_asset( + ::RuntimeOrigin::signed(AssetHubRococo::account_id_of(ALICE)), + bridged_weth_at_ah, + sov_ahw_on_ahr, + amount_to_send * 2, + ); + + // set up source chain AH Westend: + // create wETH and USDT foreign assets on Westend and prefund sender's account + let prefund_accounts = vec![(sender.clone(), amount_to_send * 2)]; + create_foreign_on_ah_westend(bridged_weth_at_ah, true, prefund_accounts.clone()); + create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend, true, prefund_accounts); + + // check balances before + let receiver_usdts_before = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(USDT_ID, &receiver) + }); + let receiver_weth_before = foreign_balance_on_ah_rococo(bridged_weth_at_ah, &receiver); + + let usdt_id: AssetId = Location::try_from(bridged_usdt_at_asset_hub_westend).unwrap().into(); + // send USDTs and wETHs + let assets: Assets = vec![ + (usdt_id.clone(), amount_to_send).into(), + (Location::try_from(bridged_weth_at_ah).unwrap(), amount_to_send).into(), + ] + .into(); + // use USDT for fees + let fee = usdt_id; + + // use the more involved transfer extrinsic + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary: AccountId32Junction { network: None, id: receiver.clone().into() }.into(), + }]); + assert_ok!(AssetHubWestend::execute_with(|| { + ::PolkadotXcm::transfer_assets_using_type_and_then( + ::RuntimeOrigin::signed(sender.into()), + bx!(asset_hub_rococo_location().into()), + bx!(assets.into()), + bx!(TransferType::DestinationReserve), + bx!(fee.into()), + bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify hops (also advances the message through the hops) + assert_bridge_hub_westend_message_accepted(true); + assert_bridge_hub_rococo_message_received(); + AssetHubRococo::execute_with(|| { + AssetHubRococo::assert_xcmp_queue_success(None); + }); + + let receiver_usdts_after = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(USDT_ID, &receiver) + }); + let receiver_weth_after = foreign_balance_on_ah_rococo(bridged_weth_at_ah, &receiver); + + // Receiver's USDT balance is increased by almost `amount_to_send` (minus fees) + assert!(receiver_usdts_after > receiver_usdts_before); + assert!(receiver_usdts_after < receiver_usdts_before + amount_to_send); + // Receiver's wETH balance is increased by `amount_to_send` + assert_eq!(receiver_weth_after, receiver_weth_before + amount_to_send); } #[test] fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() { - let wnd_at_westend_parachains: Location = Parent.into(); - let wnd_at_asset_hub_rococo = Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); - let owner: AccountId = AssetHubRococo::account_id_of(ALICE); - AssetHubRococo::force_create_foreign_asset( - wnd_at_asset_hub_rococo.clone().try_into().unwrap(), - owner, - true, - ASSET_MIN_BALANCE, - vec![], - ); + let amount = ASSET_HUB_WESTEND_ED * 10_000_000; + let sender = PenpalBSender::get(); + let receiver = AssetHubRococoReceiver::get(); + let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let (wnd_at_westend_parachains, wnd_at_asset_hub_rococo) = + set_up_wnds_for_penpal_westend_through_ahw_to_ahr(&sender, amount); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( Rococo, AssetHubRococo::para_id(), ); - - let amount = ASSET_HUB_WESTEND_ED * 10_000_000; - let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); - let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location); - // fund Penpal's sovereign account on AssetHub - AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahw.into(), amount * 2)]); - // fund Penpal's sender account - PenpalB::mint_foreign_asset( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - wnd_at_westend_parachains.clone(), - PenpalBSender::get(), - amount * 2, - ); - let wnds_in_reserve_on_ahw_before = ::account_data_of(sov_ahr_on_ahw.clone()).free; let sender_wnds_before = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance( - wnd_at_westend_parachains.clone(), - &PenpalBSender::get(), - ) + >::balance(wnd_at_westend_parachains.clone(), &sender) }); - let receiver_wnds_before = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - wnd_at_asset_hub_rococo.clone().try_into().unwrap(), - &AssetHubRococoReceiver::get(), - ) - }); - send_asset_from_penpal_westend_through_local_asset_hub_to_rococo_asset_hub( - wnd_at_westend_parachains.clone(), - amount, - ); + let receiver_wnds_before = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo, &receiver); + + // Send WNDs over bridge + { + let destination = asset_hub_rococo_location(); + let assets: Assets = (wnd_at_westend_parachains.clone(), amount).into(); + let asset_transfer_type = TransferType::RemoteReserve(local_asset_hub.clone().into()); + let fees_id: AssetId = wnd_at_westend_parachains.clone().into(); + let fees_transfer_type = TransferType::RemoteReserve(local_asset_hub.into()); + let beneficiary: Location = + AccountId32Junction { network: None, id: receiver.clone().into() }.into(); + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary, + }]); + send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah( + destination, + (assets, asset_transfer_type), + (fees_id, fees_transfer_type), + custom_xcm_on_dest, + ); + } + // process AHR incoming message and check events AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -345,7 +399,7 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() // issue WNDs on AHR RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { asset_id: *asset_id == wnd_at_westend_parachains.clone().try_into().unwrap(), - owner: *owner == AssetHubRococoReceiver::get(), + owner: owner == &receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -357,15 +411,9 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() let sender_wnds_after = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(wnd_at_westend_parachains, &PenpalBSender::get()) - }); - let receiver_wnds_after = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - wnd_at_asset_hub_rococo.try_into().unwrap(), - &AssetHubRococoReceiver::get(), - ) + >::balance(wnd_at_westend_parachains, &sender) }); + let receiver_wnds_after = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo, &receiver); let wnds_in_reserve_on_ahw_after = ::account_data_of(sov_ahr_on_ahw.clone()).free; @@ -377,3 +425,121 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() assert!(wnds_in_reserve_on_ahw_after > wnds_in_reserve_on_ahw_before); assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + amount); } + +#[test] +fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() { + let roc_at_westend_parachains = bridged_roc_at_ah_westend(); + let roc_at_westend_parachains_v3 = roc_at_westend_parachains.clone().try_into().unwrap(); + let amount = ASSET_HUB_WESTEND_ED * 10_000_000; + let sender = PenpalBSender::get(); + let receiver = AssetHubRococoReceiver::get(); + + // set up WNDs for transfer + let (wnd_at_westend_parachains, _) = + set_up_wnds_for_penpal_westend_through_ahw_to_ahr(&sender, amount); + + // set up ROCs for transfer + let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(penpal_location); + let prefund_accounts = vec![(sov_penpal_on_ahr, amount * 2)]; + create_foreign_on_ah_westend(roc_at_westend_parachains_v3, true, prefund_accounts); + let asset_owner: AccountId = AssetHubWestend::account_id_of(ALICE); + PenpalB::force_create_foreign_asset( + roc_at_westend_parachains.clone(), + asset_owner.clone(), + true, + ASSET_MIN_BALANCE, + vec![(sender.clone(), amount * 2)], + ); + + // fund the AHW's SA on AHR with the ROC tokens held in reserve + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), amount * 2)]); + + // balances before + let sender_rocs_before = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.clone().into(), &sender) + }); + let receiver_rocs_before = ::account_data_of(receiver.clone()).free; + + // send ROCs over the bridge, WNDs only used to pay fees on local AH, pay with ROC on remote AH + { + let final_destination = asset_hub_rococo_location(); + let intermediary_hop = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let context = PenpalB::execute_with(|| PenpalUniversalLocation::get()); + + // what happens at final destination + let beneficiary = AccountId32Junction { network: None, id: receiver.clone().into() }.into(); + // use ROC as fees on the final destination (AHW) + let remote_fees: Asset = (roc_at_westend_parachains.clone(), amount).into(); + let remote_fees = remote_fees.reanchored(&final_destination, &context).unwrap(); + // buy execution using ROCs, then deposit all remaining ROCs + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: WeightLimit::Unlimited }, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary }, + ]); + + // what happens at intermediary hop + // reanchor final dest (Asset Hub Rococo) to the view of hop (Asset Hub Westend) + let mut final_destination = final_destination.clone(); + final_destination.reanchor(&intermediary_hop, &context).unwrap(); + // reanchor ROCs to the view of hop (Asset Hub Westend) + let asset: Asset = (roc_at_westend_parachains.clone(), amount).into(); + let asset = asset.reanchored(&intermediary_hop, &context).unwrap(); + // on Asset Hub Westend, forward a request to withdraw ROCs from reserve on Asset Hub Rococo + let xcm_on_hop = Xcm::<()>(vec![InitiateReserveWithdraw { + assets: Definite(asset.into()), // ROCs + reserve: final_destination, // AHR + xcm: xcm_on_final_dest, // XCM to execute on AHR + }]); + // assets to send from Penpal and how they reach the intermediary hop + let assets: Assets = vec![ + (roc_at_westend_parachains.clone(), amount).into(), + (wnd_at_westend_parachains.clone(), amount).into(), + ] + .into(); + let asset_transfer_type = TransferType::DestinationReserve; + let fees_id: AssetId = wnd_at_westend_parachains.into(); + let fees_transfer_type = TransferType::DestinationReserve; + + // initiate the transfer + send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah( + intermediary_hop, + (assets, asset_transfer_type), + (fees_id, fees_transfer_type), + xcm_on_hop, + ); + } + + // process AHR incoming message and check events + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // issue WNDs on AHR + RuntimeEvent::Balances(pallet_balances::Event::Issued { .. }) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.into(), &sender) + }); + let receiver_rocs_after = ::account_data_of(receiver).free; + + // Sender's balance is reduced by sent "amount" + assert_eq!(sender_rocs_after, sender_rocs_before - amount); + // Receiver's balance is increased by no more than "amount" + assert!(receiver_rocs_after > receiver_rocs_before); + assert!(receiver_rocs_after <= receiver_rocs_before + amount); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index b781d6e987ca1fc7f2a64710263f432c1cc8b3c6..92e864229a9cd605fbe0f8ca843deef6d74d30c2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -19,40 +19,161 @@ mod asset_transfers; mod send_xcm; mod teleport; +mod snowbridge { + pub const CHAIN_ID: u64 = 11155111; + pub const WETH: [u8; 20] = hex_literal::hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); +} + pub(crate) fn asset_hub_rococo_location() -> Location { + Location::new(2, [GlobalConsensus(Rococo), Parachain(AssetHubRococo::para_id().into())]) +} + +pub(crate) fn bridge_hub_rococo_location() -> Location { + Location::new(2, [GlobalConsensus(Rococo), Parachain(BridgeHubRococo::para_id().into())]) +} + +// WND and wWND +pub(crate) fn wnd_at_ah_westend() -> Location { + Parent.into() +} +pub(crate) fn bridged_wnd_at_ah_rococo() -> Location { + Location::new(2, [GlobalConsensus(Westend)]) +} + +// wROC +pub(crate) fn bridged_roc_at_ah_westend() -> Location { + Location::new(2, [GlobalConsensus(Rococo)]) +} + +// USDT and wUSDT +pub(crate) fn usdt_at_ah_rococo() -> Location { + Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())]) +} +pub(crate) fn bridged_usdt_at_ah_westend() -> Location { Location::new( 2, - [GlobalConsensus(NetworkId::Rococo), Parachain(AssetHubRococo::para_id().into())], + [ + GlobalConsensus(Rococo), + Parachain(AssetHubRococo::para_id().into()), + PalletInstance(ASSETS_PALLET_ID), + GeneralIndex(USDT_ID.into()), + ], ) } -pub(crate) fn bridge_hub_rococo_location() -> Location { +// wETH has same relative location on both Rococo and Westend AssetHubs +pub(crate) fn weth_at_asset_hubs() -> Location { Location::new( 2, - [GlobalConsensus(NetworkId::Rococo), Parachain(BridgeHubRococo::para_id().into())], + [ + GlobalConsensus(Ethereum { chain_id: snowbridge::CHAIN_ID }), + AccountKey20 { network: None, key: snowbridge::WETH }, + ], ) } -pub(crate) fn send_asset_from_asset_hub_westend( +pub(crate) fn create_foreign_on_ah_rococo(id: v3::Location, sufficient: bool) { + let owner = AssetHubRococo::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); +} + +pub(crate) fn create_foreign_on_ah_westend( + id: v3::Location, + sufficient: bool, + prefund_accounts: Vec<(AccountId, u128)>, +) { + let owner = AssetHubWestend::account_id_of(ALICE); + let min = ASSET_MIN_BALANCE; + AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); +} + +pub(crate) fn foreign_balance_on_ah_rococo(id: v3::Location, who: &AccountId) -> u128 { + AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(id, who) + }) +} +pub(crate) fn foreign_balance_on_ah_westend(id: v3::Location, who: &AccountId) -> u128 { + AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(id, who) + }) +} + +// set up pool +pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v3::Location, is_foreign: bool) { + let roc: v3::Location = v3::Parent.into(); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + let owner = AssetHubRococoSender::get(); + let signed_owner = ::RuntimeOrigin::signed(owner.clone()); + + if is_foreign { + assert_ok!(::ForeignAssets::mint( + signed_owner.clone(), + asset.into(), + owner.clone().into(), + 3_000_000_000_000, + )); + } else { + let asset_id = match asset.interior.split_last() { + (_, Some(v3::Junction::GeneralIndex(id))) => id as u32, + _ => unreachable!(), + }; + assert_ok!(::Assets::mint( + signed_owner.clone(), + asset_id.into(), + owner.clone().into(), + 3_000_000_000_000, + )); + } + assert_ok!(::AssetConversion::create_pool( + signed_owner.clone(), + Box::new(roc), + Box::new(asset), + )); + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, + ] + ); + assert_ok!(::AssetConversion::add_liquidity( + signed_owner.clone(), + Box::new(roc), + Box::new(asset), + 1_000_000_000_000, + 2_000_000_000_000, + 1, + 1, + owner.into() + )); + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {..}) => {}, + ] + ); + }); +} + +pub(crate) fn send_assets_from_asset_hub_westend( destination: Location, - (id, amount): (Location, u128), + assets: Assets, + fee_idx: u32, ) -> DispatchResult { let signed_origin = ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); - let beneficiary: Location = AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); - let assets: Assets = (id, amount).into(); - let fee_asset_item = 0; - AssetHubWestend::execute_with(|| { ::PolkadotXcm::limited_reserve_transfer_assets( signed_origin, bx!(destination.into()), bx!(beneficiary.into()), bx!(assets.into()), - fee_asset_item, + fee_idx, WeightLimit::Unlimited, ) }) diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index 8539df97be9331ea1126a56a70a47ac3a597ce5b..dee411bea8b7353f07b5542b1a81f08ed2dbd966 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -81,7 +81,11 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // send XCM from AssetHubWestend - fails - destination version not known assert_err!( - send_asset_from_asset_hub_westend(destination.clone(), (native_token.clone(), amount)), + send_assets_from_asset_hub_westend( + destination.clone(), + (native_token.clone(), amount).into(), + 0 + ), DispatchError::Module(sp_runtime::ModuleError { index: 31, error: [1, 0, 0, 0], @@ -98,9 +102,10 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { newer_xcm_version, ); // send XCM from AssetHubWestend - ok - assert_ok!(send_asset_from_asset_hub_westend( + assert_ok!(send_assets_from_asset_hub_westend( destination.clone(), - (native_token.clone(), amount) + (native_token.clone(), amount).into(), + 0 )); // `ExportMessage` on local BridgeHub - fails - remote BridgeHub version not known @@ -115,9 +120,10 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ); // send XCM from AssetHubWestend - ok - assert_ok!(send_asset_from_asset_hub_westend( + assert_ok!(send_assets_from_asset_hub_westend( destination.clone(), - (native_token.clone(), amount) + (native_token.clone(), amount).into(), + 0 )); assert_bridge_hub_westend_message_accepted(true); assert_bridge_hub_rococo_message_received(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs index c960233c08b73df30b2f873f2ef5333ea15bf428..64378a844f52a7cd14fecd2808fd1b5051d62528 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs @@ -13,8 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::tests::*; -use bridge_hub_westend_runtime::xcm_config::XcmConfig; +use crate::imports::*; #[test] fn teleport_to_other_system_parachains_works() { @@ -22,9 +21,9 @@ fn teleport_to_other_system_parachains_works() { let native_asset: Assets = (Parent, amount).into(); test_parachain_is_trusted_teleporter!( - BridgeHubWestend, // Origin - XcmConfig, // XCM configuration - vec![AssetHubWestend], // Destinations + BridgeHubWestend, // Origin + BridgeHubWestendXcmConfig, // XCM configuration + vec![AssetHubWestend], // Destinations (native_asset, amount) ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml index 297f68de6218317017a36c9535ad581aa86e2883..3012e2b19f5326f9df8e61521a82afeaaaa73fae 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml @@ -11,33 +11,30 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -assert_matches = "1.5.0" +codec = { workspace = true } +assert_matches = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-utility = { path = "../../../../../../../substrate/frame/utility", default-features = false } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +pallet-balances = { workspace = true } +pallet-asset-rate = { workspace = true } +pallet-assets = { workspace = true } +pallet-treasury = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-utility = { workspace = true } # Polkadot -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } -westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants" } +polkadot-runtime-common = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } +westend-runtime-constants = { workspace = true, default-features = true } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } -testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["westend"] } -asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } -collectives-westend-runtime = { path = "../../../../../runtimes/collectives/collectives-westend" } -cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../../../pallets/xcmp-queue" } -cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -westend-system-emulated-network = { path = "../../../networks/westend-system" } +parachains-common = { workspace = true, default-features = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +emulated-integration-tests-common = { workspace = true } +westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs index 97239330216ac8f66a7684811d1de30b13f56f7e..8af93a62f4a17e7f134b9d2f3d53ef7e267370de 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs @@ -19,9 +19,18 @@ pub use emulated_integration_tests_common::xcm_emulator::{ assert_expected_events, bx, Chain, RelayChain as Relay, TestExt, }; pub use westend_system_emulated_network::{ - asset_hub_westend_emulated_chain::AssetHubWestendParaPallet as AssetHubWestendPallet, - collectives_westend_emulated_chain::CollectivesWestendParaPallet as CollectivesWestendPallet, - westend_emulated_chain::WestendRelayPallet as WestendPallet, + asset_hub_westend_emulated_chain::{ + asset_hub_westend_runtime::xcm_config::LocationToAccountId as AssetHubLocationToAccountId, + AssetHubWestendParaPallet as AssetHubWestendPallet, + }, + collectives_westend_emulated_chain::{ + collectives_westend_runtime::fellowship as collectives_fellowship, + CollectivesWestendParaPallet as CollectivesWestendPallet, + }, + westend_emulated_chain::{ + westend_runtime::{governance as westend_governance, OriginCaller as WestendOriginCaller}, + WestendRelayPallet as WestendPallet, + }, AssetHubWestendPara as AssetHubWestend, CollectivesWestendPara as CollectivesWestend, WestendRelay as Westend, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs index bde1220e2495bc544e507be1a8b40d77fcbde894..abd9a982c8ed5e5407776d3bb6d84686009f59f4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs @@ -14,14 +14,12 @@ // limitations under the License. use crate::*; -use asset_hub_westend_runtime::xcm_config::LocationToAccountId as AssetHubLocationToAccountId; use emulated_integration_tests_common::accounts::ALICE; use frame_support::{ assert_ok, dispatch::RawOrigin, instances::Instance1, sp_runtime::traits::Dispatchable, traits::fungible::Inspect, }; use polkadot_runtime_common::impls::VersionedLocatableAsset; -use westend_runtime::OriginCaller; use westend_runtime_constants::currency::UNITS; use xcm_executor::traits::ConvertLocation; @@ -65,7 +63,7 @@ fn fellowship_treasury_spend() { let treasury_location: Location = (Parent, PalletInstance(37)).into(); let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { - as_origin: bx!(OriginCaller::system(RawOrigin::Signed(treasury_account))), + as_origin: bx!(WestendOriginCaller::system(RawOrigin::Signed(treasury_account))), call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), beneficiary: bx!(VersionedLocation::V4(treasury_location)), @@ -97,7 +95,7 @@ fn fellowship_treasury_spend() { // Fund Fellowship Treasury from Westend Treasury. let treasury_origin: RuntimeOrigin = - westend_runtime::governance::pallet_custom_origins::Origin::Treasurer.into(); + westend_governance::pallet_custom_origins::Origin::Treasurer.into(); let fellowship_treasury_location: Location = Location::new(1, [Parachain(1001), PalletInstance(65)]); let asset_hub_location: Location = [Parachain(1000)].into(); @@ -170,8 +168,7 @@ fn fellowship_treasury_spend() { // Fund Alice account from Fellowship Treasury. let fellows_origin: RuntimeOrigin = - collectives_westend_runtime::fellowship::pallet_fellowship_origins::Origin::Fellows - .into(); + collectives_fellowship::pallet_fellowship_origins::Origin::Fellows.into(); let asset_hub_location: Location = (Parent, Parachain(1000)).into(); let native_asset = Location::parent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml index 29a939951e597a939de5ed9d244193ac8455e4d5..011be93ecac73f06a73616802d19a0e871cd8b30 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml @@ -8,25 +8,23 @@ description = "People Rococo runtime integration tests with xcm-emulator" publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-identity = { path = "../../../../../../../substrate/frame/identity", default-features = false } +frame-support = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-identity = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } -rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants" } -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +polkadot-runtime-common = { workspace = true, default-features = true } +rococo-runtime-constants = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../common" } -people-rococo-runtime = { path = "../../../../../runtimes/people/people-rococo" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-system-emulated-network = { path = "../../../networks/rococo-system" } +asset-test-utils = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } +rococo-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs index 38ff08b486d4703513ab74d681ae5f07107931ec..6c23c2f1f292e9f8235b1f862e2ac934877609d8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs @@ -37,9 +37,19 @@ mod imports { pub use parachains_common::Balance; pub use rococo_system_emulated_network::{ people_rococo_emulated_chain::{ - genesis::ED as PEOPLE_ROCOCO_ED, PeopleRococoParaPallet as PeopleRococoPallet, + genesis::ED as PEOPLE_ROCOCO_ED, + people_rococo_runtime::{people, xcm_config::XcmConfig as PeopleRococoXcmConfig}, + PeopleRococoParaPallet as PeopleRococoPallet, + }, + rococo_emulated_chain::{ + genesis::ED as ROCOCO_ED, + rococo_runtime::{ + xcm_config::XcmConfig as RococoXcmConfig, BasicDeposit, ByteDeposit, + MaxAdditionalFields, MaxSubAccounts, RuntimeOrigin as RococoOrigin, + SubAccountDeposit, + }, + RococoRelayPallet as RococoPallet, }, - rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, PeopleRococoPara as PeopleRococo, PeopleRococoParaReceiver as PeopleRococoReceiver, PeopleRococoParaSender as PeopleRococoSender, RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, RococoRelaySender as RococoSender, diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs index 3f1f8638d6fa1491288cd6bbd60f08c13af566dd..342a8f053f6076cdb62a7ff2dc6dfccd98c3b7bc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs @@ -42,14 +42,10 @@ use crate::imports::*; use frame_support::BoundedVec; use pallet_balances::Event as BalancesEvent; use pallet_identity::{legacy::IdentityInfo, Data, Event as IdentityEvent}; -use people_rococo_runtime::people::{ +use people::{ BasicDeposit as BasicDepositParachain, ByteDeposit as ByteDepositParachain, IdentityInfo as IdentityInfoParachain, SubAccountDeposit as SubAccountDepositParachain, }; -use rococo_runtime::{ - BasicDeposit, ByteDeposit, MaxAdditionalFields, MaxSubAccounts, RuntimeOrigin as RococoOrigin, - SubAccountDeposit, -}; use rococo_runtime_constants::currency::*; use rococo_system_emulated_network::{ rococo_emulated_chain::RococoRelayPallet, RococoRelay, RococoRelaySender, diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index 350d87d638ab25bd66a411a67fa3f109bdaffff9..4410d1bd40dcc1276bb5441962f855823861d1cd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -14,8 +14,6 @@ // limitations under the License. use crate::imports::*; -use people_rococo_runtime::xcm_config::XcmConfig as PeopleRococoXcmConfig; -use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; fn relay_origin_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index 6eab6f52aa72172ecc19fa891109fc9df859ec3c..f7e1cce85a2cf7895d178cb3f95a24070c790198 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -8,25 +8,23 @@ description = "People Westend runtime integration tests with xcm-emulator" publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-identity = { path = "../../../../../../../substrate/frame/identity", default-features = false } +frame-support = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-identity = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } -westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants" } -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +polkadot-runtime-common = { workspace = true, default-features = true } +westend-runtime-constants = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../common" } -people-westend-runtime = { path = "../../../../../runtimes/people/people-westend" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -westend-system-emulated-network = { path = "../../../networks/westend-system" } +asset-test-utils = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } +westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs index 77ac7cfc78c78c6f51ea174b2436d05ee612e0b7..ce1ed9751a2e68ab1cb087983dbb01e21d34bcc8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs @@ -35,10 +35,21 @@ mod imports { }; pub use parachains_common::Balance; pub use westend_system_emulated_network::{ + self, people_westend_emulated_chain::{ - genesis::ED as PEOPLE_WESTEND_ED, PeopleWestendParaPallet as PeopleWestendPallet, + genesis::ED as PEOPLE_WESTEND_ED, + people_westend_runtime::{people, xcm_config::XcmConfig as PeopleWestendXcmConfig}, + PeopleWestendParaPallet as PeopleWestendPallet, + }, + westend_emulated_chain::{ + genesis::ED as WESTEND_ED, + westend_runtime::{ + xcm_config::XcmConfig as WestendXcmConfig, BasicDeposit, ByteDeposit, + MaxAdditionalFields, MaxSubAccounts, RuntimeOrigin as WestendOrigin, + SubAccountDeposit, + }, + WestendRelayPallet as WestendPallet, }, - westend_emulated_chain::{genesis::ED as WESTEND_ED, WestendRelayPallet as WestendPallet}, PeopleWestendPara as PeopleWestend, PeopleWestendParaReceiver as PeopleWestendReceiver, PeopleWestendParaSender as PeopleWestendSender, WestendRelay as Westend, WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs index 3ed8592918d65b81737a4e8206a3bc23b3684b5f..28d1be853204ea5e3943de7da8b14a7601006bd6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs @@ -42,14 +42,10 @@ use crate::imports::*; use frame_support::BoundedVec; use pallet_balances::Event as BalancesEvent; use pallet_identity::{legacy::IdentityInfo, Data, Event as IdentityEvent}; -use people_westend_runtime::people::{ +use people::{ BasicDeposit as BasicDepositParachain, ByteDeposit as ByteDepositParachain, IdentityInfo as IdentityInfoParachain, SubAccountDeposit as SubAccountDepositParachain, }; -use westend_runtime::{ - BasicDeposit, ByteDeposit, MaxAdditionalFields, MaxSubAccounts, RuntimeOrigin as WestendOrigin, - SubAccountDeposit, -}; use westend_runtime_constants::currency::*; use westend_system_emulated_network::{ westend_emulated_chain::WestendRelayPallet, WestendRelay, WestendRelaySender, diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index 8697477ba769329755b40a87132c62b213861cc4..6fd3cdeb61fbc3d3deb5445a7edf57a5008367c6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -14,8 +14,6 @@ // limitations under the License. use crate::imports::*; -use people_westend_runtime::xcm_config::XcmConfig as PeopleWestendXcmConfig; -use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; fn relay_origin_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index 92e0a54631394154634900829c708431b2931b67..61cbe7850090195af08bccfd3f0b172dcbb85b60 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -10,19 +10,19 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } +sp-io = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 01ee12bf4e719a9fde63fa4adb43bc6ff4e5c3ea..7369c3a2c157624b7af48f860c73ba61716a1038 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -10,16 +10,16 @@ description = "Pallet to store the parachain ID" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index f51946e9ebd5d2c5fd471683a217fe2dbe8f2f61..f74328207b84387f969bcf94d81d6bef705f877a 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -10,18 +10,18 @@ description = "Ping Pallet for Cumulus XCM/UMP testing." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } -cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } -cumulus-pallet-xcm = { path = "../../../pallets/xcm", default-features = false } +cumulus-primitives-core = { workspace = true } +cumulus-pallet-xcm = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index a880730ddacfdde9fcbda95fefb457af031b3da7..9ef0aa0072c96a3077c2a9d1c8b3793521938ff5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -10,96 +10,97 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion-ops = { path = "../../../../../substrate/frame/asset-conversion/ops", default-features = false } -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false } -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false } -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } -sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-asset-conversion-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion-ops = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-assets-freezer = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nft-fractionalization = { workspace = true } +pallet-nfts = { workspace = true } +pallet-nfts-runtime-api = { workspace = true } +pallet-proxy = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-uniques = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } +sp-weights = { workspace = true } # num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } # Polkadot -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +rococo-runtime-constants = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } -assets-common = { path = "../common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } +assets-common = { workspace = true } # Bridges -pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } -snowbridge-router-primitives = { path = "../../../../../bridges/snowbridge/primitives/router", default-features = false } +pallet-xcm-bridge-hub-router = { workspace = true } +bp-asset-hub-rococo = { workspace = true } +bp-asset-hub-westend = { workspace = true } +bp-bridge-hub-rococo = { workspace = true } +bp-bridge-hub-westend = { workspace = true } +snowbridge-router-primitives = { workspace = true } [dev-dependencies] -asset-test-utils = { path = "../test-utils" } +asset-test-utils = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -116,6 +117,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets-freezer/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", @@ -137,7 +139,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -151,6 +153,7 @@ try-runtime = [ "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", + "pallet-assets-freezer/try-runtime", "pallet-assets/try-runtime", "pallet-aura/try-runtime", "pallet-authorship/try-runtime", @@ -200,6 +203,7 @@ std = [ "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", + "pallet-assets-freezer/std", "pallet-assets/std", "pallet-aura/std", "pallet-authorship/std", @@ -246,7 +250,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index d75b07bd2b9fce6cd52c0aa3d93e3aa3d0dff39f..e8772c0b4830363723b96424d5507c461715354e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -100,7 +100,7 @@ use xcm::{ latest::prelude::{AssetId, BodyId}, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -118,7 +118,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -257,7 +257,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = AssetsStringLimit; - type Freezer = (); + type Freezer = AssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_local::WeightInfo; type CallbackHandle = (); @@ -267,6 +267,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +// Allow Freezes for the `Assets` pallet +pub type AssetsFreezerInstance = pallet_assets_freezer::Instance1; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); @@ -295,7 +302,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = ConstU128<0>; type ApprovalDeposit = ApprovalDeposit; type StringLimit = ConstU32<50>; - type Freezer = (); + type Freezer = PoolAssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_pool::WeightInfo; type CallbackHandle = (); @@ -303,6 +310,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +// Allow Freezes for the `PoolAssets` pallet +pub type PoolAssetsFreezerInstance = pallet_assets_freezer::Instance3; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + /// Union fungibles implementation for `Assets` and `ForeignAssets`. pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, @@ -411,7 +425,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; type ApprovalDeposit = ForeignAssetsApprovalDeposit; type StringLimit = ForeignAssetsAssetsStringLimit; - type Freezer = (); + type Freezer = ForeignAssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_foreign::WeightInfo; type CallbackHandle = (); @@ -421,6 +435,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; } +// Allow Freezes for the `ForeignAssets` pallet +pub type ForeignAssetsFreezerInstance = pallet_assets_freezer::Instance2; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. pub const DepositBase: Balance = deposit(1, 88); @@ -953,6 +974,9 @@ construct_runtime!( NftFractionalization: pallet_nft_fractionalization = 54, PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, + AssetsFreezer: pallet_assets_freezer:: = 57, + ForeignAssetsFreezer: pallet_assets_freezer:: = 58, + PoolAssetsFreezer: pallet_assets_freezer:: = 59, // TODO: the pallet instance should be removed once all pools have migrated // to the new account IDs. @@ -1298,7 +1322,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -1311,11 +1335,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -1330,7 +1354,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -1340,6 +1364,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs index 775bc3bdb80f54a8db97d1c1fdbf5a837fdb95b1..0a86037391b42d71340d8d0665a9210d8b9a0281 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -49,32 +49,32 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: // Measured: `154` - // Estimated: `1639` - // Minimum execution time: 7_853_000 picoseconds. - Weight::from_parts(8_443_000, 0) - .saturating_add(Weight::from_parts(0, 1639)) + // Estimated: `5487` + // Minimum execution time: 8_078_000 picoseconds. + Weight::from_parts(8_455_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `1629` - // Minimum execution time: 4_333_000 picoseconds. - Weight::from_parts(4_501_000, 0) - .saturating_add(Weight::from_parts(0, 1629)) + // Estimated: `5487` + // Minimum execution time: 4_291_000 picoseconds. + Weight::from_parts(4_548_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) @@ -83,14 +83,12 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `150` // Estimated: `1502` - // Minimum execution time: 10_167_000 picoseconds. - Weight::from_parts(10_667_000, 0) + // Minimum execution time: 9_959_000 picoseconds. + Weight::from_parts(10_372_000, 0) .saturating_add(Weight::from_parts(0, 1502)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) @@ -100,7 +98,9 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) @@ -108,17 +108,17 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) fn send_message() -> Weight { // Proof Size summary in bytes: // Measured: `448` // Estimated: `6388` - // Minimum execution time: 60_584_000 picoseconds. - Weight::from_parts(62_467_000, 0) + // Minimum execution time: 45_888_000 picoseconds. + Weight::from_parts(47_022_000, 0) .saturating_add(Weight::from_parts(0, 6388)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index cf5a3905e581641fb504d43318cc562c32bca8b8..03de2c971b7ff133223db0385919a90d0393fea3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -337,10 +337,11 @@ impl xcm_executor::Config for XcmConfig { type OriginConverter = XcmOriginToTransactDispatchOrigin; // Asset Hub trusts only particular, pre-configured bridged locations from a different consensus // as reserve locations (we trust the Bridge Hub to relay the message that a reserve is being - // held). Asset Hub may _act_ as a reserve location for ROC and assets created - // under `pallet-assets`. Users must use teleport where allowed (e.g. ROC with the Relay Chain). + // held). On Rococo Asset Hub, we allow Westend Asset Hub to act as reserve for any asset native + // to the Westend ecosystem. We also allow Ethereum contracts to act as reserves for the foreign + // assets identified by the same respective contracts locations. type IsReserve = ( - bridging::to_westend::IsTrustedBridgedReserveLocationForConcreteAsset, + bridging::to_westend::WestendAssetFromAssetHubWestend, bridging::to_ethereum::IsTrustedBridgedReserveLocationForForeignAsset, ); type IsTeleporter = TrustedTeleporters; @@ -568,13 +569,12 @@ pub mod bridging { ); pub const WestendNetwork: NetworkId = NetworkId::Westend; - pub AssetHubWestend: Location = Location::new(2, [GlobalConsensus(WestendNetwork::get()), Parachain(bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID)]); + pub WestendEcosystem: Location = Location::new(2, [GlobalConsensus(WestendNetwork::get())]); pub WndLocation: Location = Location::new(2, [GlobalConsensus(WestendNetwork::get())]); - - pub WndFromAssetHubWestend: (AssetFilter, Location) = ( - Wild(AllOf { fun: WildFungible, id: AssetId(WndLocation::get()) }), - AssetHubWestend::get() - ); + pub AssetHubWestend: Location = Location::new(2, [ + GlobalConsensus(WestendNetwork::get()), + Parachain(bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID) + ]); /// Set up exporters configuration. /// `Option` represents static "base fee" which is used for total delivery fee calculation. @@ -607,17 +607,9 @@ pub mod bridging { } } - /// Trusted reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive WND from AssetHubWestend - xcm_builder::Case, - // and nothing else - ), - >; + /// Allow any asset native to the Westend ecosystem if it comes from Westend Asset Hub. + pub type WestendAssetFromAssetHubWestend = + matching::RemoteAssetFromLocation, AssetHubWestend>; impl Contains for ToWestendXcmRouter { fn contains(call: &RuntimeCall) -> bool { @@ -672,7 +664,7 @@ pub mod bridging { } pub type IsTrustedBridgedReserveLocationForForeignAsset = - matching::IsForeignConcreteAsset>; + IsForeignConcreteAsset>; impl Contains<(Location, Junction)> for UniversalAliases { fn contains(alias: &(Location, Junction)) -> bool { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 953f6a8b4009a5ab86cc7d1177dfbb993d291c8d..7e618d950b19a86a46903d3b8351c1c1be542cb9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -10,95 +10,96 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-asset-conversion-ops = { path = "../../../../../substrate/frame/asset-conversion/ops", default-features = false } -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false } -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false } -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-asset-conversion-ops = { workspace = true } +pallet-asset-conversion-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-assets-freezer = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nft-fractionalization = { workspace = true } +pallet-nfts = { workspace = true } +pallet-nfts-runtime-api = { workspace = true } +pallet-proxy = { workspace = true } +pallet-session = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-uniques = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +westend-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } -assets-common = { path = "../common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } +assets-common = { workspace = true } # Bridges -pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } +pallet-xcm-bridge-hub-router = { workspace = true } +bp-asset-hub-rococo = { workspace = true } +bp-asset-hub-westend = { workspace = true } +bp-bridge-hub-rococo = { workspace = true } +bp-bridge-hub-westend = { workspace = true } [dev-dependencies] -asset-test-utils = { path = "../test-utils" } +asset-test-utils = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -115,6 +116,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets-freezer/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", @@ -136,7 +138,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -150,6 +152,7 @@ try-runtime = [ "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", + "pallet-assets-freezer/try-runtime", "pallet-assets/try-runtime", "pallet-aura/try-runtime", "pallet-authorship/try-runtime", @@ -200,6 +203,7 @@ std = [ "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", + "pallet-assets-freezer/std", "pallet-assets/std", "pallet-aura/std", "pallet-authorship/std", @@ -245,7 +249,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index e9c2b10f719daaea30219146096da76d4147fdac..55c8a9f0b265daa8e9a4d3d70712d26bd8b9fc9c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -85,10 +85,10 @@ pub use sp_runtime::BuildStorage; use assets_common::{foreign_creators::ForeignCreators, matching::FromSiblingParachain}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; - -// We exclude `Assets` since it's the name of a pallet -use xcm::latest::prelude::AssetId; +use xcm::{ + latest::prelude::AssetId, + prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, +}; #[cfg(feature = "runtime-benchmarks")] use xcm::latest::prelude::{ @@ -96,7 +96,7 @@ use xcm::latest::prelude::{ NetworkId, NonFungible, Parent, ParentThen, Response, XCM_VERSION, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -117,7 +117,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -255,7 +255,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = AssetsStringLimit; - type Freezer = (); + type Freezer = AssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_local::WeightInfo; type CallbackHandle = (); @@ -265,6 +265,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +// Allow Freezes for the `Assets` pallet +pub type AssetsFreezerInstance = pallet_assets_freezer::Instance1; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); @@ -292,7 +299,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = ConstU128<0>; type ApprovalDeposit = ConstU128<0>; type StringLimit = ConstU32<50>; - type Freezer = (); + type Freezer = PoolAssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_pool::WeightInfo; type CallbackHandle = (); @@ -300,6 +307,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +// Allow Freezes for the `PoolAssets` pallet +pub type PoolAssetsFreezerInstance = pallet_assets_freezer::Instance3; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + /// Union fungibles implementation for `Assets` and `ForeignAssets`. pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, @@ -405,7 +419,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; type ApprovalDeposit = ForeignAssetsApprovalDeposit; type StringLimit = ForeignAssetsAssetsStringLimit; - type Freezer = (); + type Freezer = ForeignAssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_foreign::WeightInfo; type CallbackHandle = (); @@ -415,6 +429,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; } +// Allow Freezes for the `ForeignAssets` pallet +pub type ForeignAssetsFreezerInstance = pallet_assets_freezer::Instance2; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. pub const DepositBase: Balance = deposit(1, 88); @@ -943,6 +964,9 @@ construct_runtime!( NftFractionalization: pallet_nft_fractionalization = 54, PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, + AssetsFreezer: pallet_assets_freezer:: = 57, + ForeignAssetsFreezer: pallet_assets_freezer:: = 58, + PoolAssetsFreezer: pallet_assets_freezer:: = 59, StateTrieMigration: pallet_state_trie_migration = 70, @@ -1326,7 +1350,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::WestendLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -1339,11 +1363,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -1358,7 +1382,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -1368,6 +1392,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs index 84d717b0283c764cac14cce63ca34f81c9f58e8c..21d15c75af553da60ecf0648186a26255bb72890 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -49,48 +49,46 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `193` - // Estimated: `1678` - // Minimum execution time: 8_095_000 picoseconds. - Weight::from_parts(8_393_000, 0) - .saturating_add(Weight::from_parts(0, 1678)) + // Measured: `226` + // Estimated: `5487` + // Minimum execution time: 8_363_000 picoseconds. + Weight::from_parts(8_620_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: // Measured: `111` - // Estimated: `1596` - // Minimum execution time: 3_417_000 picoseconds. - Weight::from_parts(3_583_000, 0) - .saturating_add(Weight::from_parts(0, 1596)) + // Estimated: `5487` + // Minimum execution time: 3_436_000 picoseconds. + Weight::from_parts(3_586_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn report_bridge_status() -> Weight { // Proof Size summary in bytes: - // Measured: `117` + // Measured: `150` // Estimated: `1502` - // Minimum execution time: 10_280_000 picoseconds. - Weight::from_parts(10_703_000, 0) + // Minimum execution time: 9_706_000 picoseconds. + Weight::from_parts(10_139_000, 0) .saturating_add(Weight::from_parts(0, 1502)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) @@ -100,7 +98,9 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) @@ -108,18 +108,18 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) fn send_message() -> Weight { // Proof Size summary in bytes: - // Measured: `487` - // Estimated: `6427` - // Minimum execution time: 63_624_000 picoseconds. - Weight::from_parts(66_071_000, 0) - .saturating_add(Weight::from_parts(0, 6427)) + // Measured: `520` + // Estimated: `6460` + // Minimum execution time: 46_250_000 picoseconds. + Weight::from_parts(47_801_000, 0) + .saturating_add(Weight::from_parts(0, 6460)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index ff1fc99cba8a7f982ab7f2a58602a48ab3420701..fc2e68c599fd1415df04c6b89510503091ef6cb5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -357,9 +357,9 @@ impl xcm_executor::Config for XcmConfig { type OriginConverter = XcmOriginToTransactDispatchOrigin; // Asset Hub trusts only particular, pre-configured bridged locations from a different consensus // as reserve locations (we trust the Bridge Hub to relay the message that a reserve is being - // held). Asset Hub may _act_ as a reserve location for WND and assets created - // under `pallet-assets`. Users must use teleport where allowed (e.g. WND with the Relay Chain). - type IsReserve = (bridging::to_rococo::IsTrustedBridgedReserveLocationForConcreteAsset,); + // held). On Westend Asset Hub, we allow Rococo Asset Hub to act as reserve for any asset native + // to the Rococo or Ethereum ecosystems. + type IsReserve = (bridging::to_rococo::RococoOrEthereumAssetFromAssetHubRococo,); type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; @@ -569,13 +569,14 @@ pub mod bridging { ); pub const RococoNetwork: NetworkId = NetworkId::Rococo; - pub AssetHubRococo: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get()), Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID)]); + pub const EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + pub RococoEcosystem: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get())]); pub RocLocation: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get())]); - - pub RocFromAssetHubRococo: (AssetFilter, Location) = ( - Wild(AllOf { fun: WildFungible, id: AssetId(RocLocation::get()) }), - AssetHubRococo::get() - ); + pub EthereumEcosystem: Location = Location::new(2, [GlobalConsensus(EthereumNetwork::get())]); + pub AssetHubRococo: Location = Location::new(2, [ + GlobalConsensus(RococoNetwork::get()), + Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID) + ]); /// Set up exporters configuration. /// `Option` represents static "base fee" which is used for total delivery fee calculation. @@ -608,17 +609,12 @@ pub mod bridging { } } - /// Reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive ROC from AssetHubRococo - xcm_builder::Case, - // and nothing else - ), - >; + /// Allow any asset native to the Rococo or Ethereum ecosystems if it comes from Rococo + /// Asset Hub. + pub type RococoOrEthereumAssetFromAssetHubRococo = matching::RemoteAssetFromLocation< + (StartsWith, StartsWith), + AssetHubRococo, + >; impl Contains for ToRococoXcmRouter { fn contains(call: &RuntimeCall) -> bool { diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index 4664e0cb9a7f817ed419936dfa9e5d0ac52f599e..94612506f510e3677db444c61c05db46f84cdc0e 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -10,30 +10,30 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } -impl-trait-for-tuples = "0.2.2" +impl-trait-for-tuples = { workspace = true } # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +pallet-asset-conversion = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -parachains-common = { path = "../../../common", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +parachains-common = { workspace = true } +cumulus-primitives-core = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/assets/common/src/matching.rs b/cumulus/parachains/runtimes/assets/common/src/matching.rs index 3aad88e177caad1095a3dbe21dd3a3308b103680..f356cb541315fa814c3df5d7ad69342c132661d3 100644 --- a/cumulus/parachains/runtimes/assets/common/src/matching.rs +++ b/cumulus/parachains/runtimes/assets/common/src/matching.rs @@ -14,7 +14,10 @@ // limitations under the License. use cumulus_primitives_core::ParaId; -use frame_support::{pallet_prelude::Get, traits::ContainsPair}; +use frame_support::{ + pallet_prelude::Get, + traits::{Contains, ContainsPair}, +}; use xcm::prelude::*; use xcm_builder::ensure_is_remote; @@ -94,36 +97,33 @@ impl< } } -/// Adapter verifies if it is allowed to receive `Asset` from `Location`. -/// -/// Note: `Location` has to be from a different global consensus. -pub struct IsTrustedBridgedReserveLocationForConcreteAsset( - sp_std::marker::PhantomData<(UniversalLocation, Reserves)>, +/// Accept an asset if it is native to `AssetsAllowedNetworks` and it is coming from +/// `OriginLocation`. +pub struct RemoteAssetFromLocation( + sp_std::marker::PhantomData<(AssetsAllowedNetworks, OriginLocation)>, ); -impl, Reserves: ContainsPair> - ContainsPair - for IsTrustedBridgedReserveLocationForConcreteAsset +impl, OriginLocation: Get> + ContainsPair for RemoteAssetFromLocation { fn contains(asset: &Asset, origin: &Location) -> bool { - let universal_source = UniversalLocation::get(); - log::trace!( - target: "xcm::contains", - "IsTrustedBridgedReserveLocationForConcreteAsset asset: {:?}, origin: {:?}, universal_source: {:?}", - asset, origin, universal_source - ); - - // check remote origin - if ensure_is_remote(universal_source.clone(), origin.clone()).is_err() { + let expected_origin = OriginLocation::get(); + // ensure `origin` is expected `OriginLocation` + if !expected_origin.eq(origin) { log::trace!( target: "xcm::contains", - "IsTrustedBridgedReserveLocationForConcreteAsset origin: {:?} is not remote to the universal_source: {:?}", - origin, universal_source + "RemoteAssetFromLocation asset: {:?}, origin: {:?} is not from expected {:?}", + asset, origin, expected_origin, ); return false + } else { + log::trace!( + target: "xcm::contains", + "RemoteAssetFromLocation asset: {asset:?}, origin: {origin:?}", + ); } - // check asset according to the configured reserve locations - Reserves::contains(asset, origin) + // ensure `asset` is from remote consensus listed in `AssetsAllowedNetworks` + AssetsAllowedNetworks::contains(&asset.id.0) } } diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index af5b4a64680724abcbf0d659ba8a7b2b45cd0e80..a7aad361e84e1b4c8555f57be2dafacab760c15d 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -10,42 +10,42 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-session = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +pallet-collator-selection = { workspace = true } +parachains-common = { workspace = true } +cumulus-primitives-core = { workspace = true } +parachain-info = { workspace = true } +parachains-runtimes-test-utils = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } # Bridges -pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } +pallet-xcm-bridge-hub-router = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" +hex-literal = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 253a21f5d0baba1218de3469766a7eb065cb8f3d..98737298468210671a6140fb119f504bd8f7f9e4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -10,125 +10,124 @@ license = "Apache-2.0" workspace = true [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -hex-literal = { version = "0.4.1" } +], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = [ +scale-info = { features = [ "derive", -] } +], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -tuplex = { version = "0.1", default-features = false } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +rococo-runtime-constants = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = [ +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { features = [ "bridging", -] } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } +], workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-polkadot = { path = "../../../../../bridges/chains/chain-bridge-hub-polkadot", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } -bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } -bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } -bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } -bp-polkadot-bulletin = { path = "../../../../../bridges/chains/chain-polkadot-bulletin", default-features = false } -bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } -bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } -bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } -pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } -pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } -pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } -pallet-xcm-bridge-hub = { path = "../../../../../bridges/modules/xcm-bridge-hub", default-features = false } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } +bp-asset-hub-rococo = { workspace = true } +bp-asset-hub-westend = { workspace = true } +bp-bridge-hub-polkadot = { workspace = true } +bp-bridge-hub-rococo = { workspace = true } +bp-bridge-hub-westend = { workspace = true } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-bulletin = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { features = ["test-helpers"], workspace = true } +bp-rococo = { workspace = true } +bp-westend = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +pallet-bridge-messages = { workspace = true } +pallet-bridge-parachains = { workspace = true } +pallet-bridge-relayers = { workspace = true } +pallet-xcm-bridge-hub = { workspace = true } +bridge-runtime-common = { workspace = true } # Ethereum Bridge (Snowbridge) -snowbridge-beacon-primitives = { path = "../../../../../bridges/snowbridge/primitives/beacon", default-features = false } -snowbridge-pallet-system = { path = "../../../../../bridges/snowbridge/pallets/system", default-features = false } -snowbridge-system-runtime-api = { path = "../../../../../bridges/snowbridge/pallets/system/runtime-api", default-features = false } -snowbridge-core = { path = "../../../../../bridges/snowbridge/primitives/core", default-features = false } -snowbridge-pallet-ethereum-client = { path = "../../../../../bridges/snowbridge/pallets/ethereum-client", default-features = false } -snowbridge-pallet-inbound-queue = { path = "../../../../../bridges/snowbridge/pallets/inbound-queue", default-features = false } -snowbridge-pallet-outbound-queue = { path = "../../../../../bridges/snowbridge/pallets/outbound-queue", default-features = false } -snowbridge-outbound-queue-runtime-api = { path = "../../../../../bridges/snowbridge/pallets/outbound-queue/runtime-api", default-features = false } -snowbridge-router-primitives = { path = "../../../../../bridges/snowbridge/primitives/router", default-features = false } -snowbridge-runtime-common = { path = "../../../../../bridges/snowbridge/runtime/runtime-common", default-features = false } +snowbridge-beacon-primitives = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-system-runtime-api = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-pallet-ethereum-client = { workspace = true } +snowbridge-pallet-inbound-queue = { workspace = true } +snowbridge-pallet-outbound-queue = { workspace = true } +snowbridge-outbound-queue-runtime-api = { workspace = true } +snowbridge-router-primitives = { workspace = true } +snowbridge-runtime-common = { workspace = true } -bridge-hub-common = { path = "../common", default-features = false } +bridge-hub-common = { workspace = true } [dev-dependencies] -static_assertions = "1.1" -bridge-hub-test-utils = { path = "../test-utils" } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = [ +static_assertions = { workspace = true, default-features = true } +bridge-hub-test-utils = { workspace = true, default-features = true } +bridge-runtime-common = { features = [ "integrity-test", -] } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } -snowbridge-runtime-test-common = { path = "../../../../../bridges/snowbridge/runtime/test-common" } +], workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +snowbridge-runtime-test-common = { workspace = true, default-features = true } [features] default = ["std"] @@ -218,10 +217,9 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", - "tuplex/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -264,7 +262,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs index 5551b05e202547c99501b279e8839611efcc7f66..779cc537ee96dcc2fadc38452dfcd01c4c320789 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs @@ -21,14 +21,9 @@ //! For example, the messaging pallet needs to know the sending and receiving chains, but the //! GRANDPA tracking pallet only needs to be aware of one chain. -use super::{ - weights, AccountId, Balance, Balances, BlockNumber, Runtime, RuntimeEvent, RuntimeOrigin, -}; +use super::{weights, AccountId, Balance, Balances, BlockNumber, Runtime, RuntimeEvent}; use bp_parachains::SingleParaStoredHeaderDataBuilder; -use bp_runtime::UnderlyingChainProvider; -use bridge_runtime_common::messages::ThisChainWithMessages; use frame_support::{parameter_types, traits::ConstU32}; -use sp_runtime::RuntimeDebug; parameter_types! { pub const RelayChainHeadersToKeep: u32 = 1024; @@ -103,15 +98,3 @@ impl pallet_bridge_grandpa::Config for Runt // weights are also the same for both bridges. type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; } - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl ThisChainWithMessages for BridgeHubRococo { - type RuntimeOrigin = RuntimeOrigin; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 94b936889b77c4460f9921956d6f7abef1ecb52c..39ea636925528a402432168fdf1a2562c339c045 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -20,23 +20,20 @@ //! are reusing Polkadot Bulletin chain primitives everywhere here. use crate::{ - bridge_common_config::BridgeHubRococo, weights, xcm_config::UniversalLocation, AccountId, - BridgeRococoBulletinGrandpa, BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, - XcmOverRococoBulletin, XcmRouter, + weights, xcm_config::UniversalLocation, BridgeRococoBulletinGrandpa, + BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, XcmOverRococoBulletin, + XcmRouter, +}; +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, LaneId, }; -use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, RefundableMessagesLane, }, - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, UnderlyingChainProvider, - }, messages_xcm_extension::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, @@ -44,7 +41,6 @@ use bridge_runtime_common::{ }; use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, @@ -52,15 +48,6 @@ use xcm::{ use xcm_builder::BridgeBlobDispatcher; parameter_types! { - /// Maximal number of entries in the unrewarded relayers vector at the Rococo Bridge Hub. It matches the - /// maximal number of unrewarded relayers that the single confirmation transaction at Rococo Bulletin Chain - /// may process. - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_polkadot_bulletin::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - /// Maximal number of unconfirmed messages at the Rococo Bridge Hub. It matches the maximal number of - /// unconfirmed messages that the single confirmation transaction at Rococo Bulletin Chain may process. - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_polkadot_bulletin::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; /// Bridge specific chain (network) identifier of the Rococo Bulletin Chain. pub const RococoBulletinChainId: bp_runtime::ChainId = bp_polkadot_bulletin::PolkadotBulletin::ID; /// Interior location (relative to this runtime) of the with-RococoBulletin messages pallet. @@ -142,31 +129,6 @@ impl XcmBlobHauler for ToRococoBulletinXcmBlobHauler { type OnMessagesDeliveredFromRococoBulletin = XcmBlobHaulerAdapter; -/// Messaging Bridge configuration for BridgeHubRococo -> Rococo Bulletin. -pub struct WithRococoBulletinMessageBridge; -impl MessageBridge for WithRococoBulletinMessageBridge { - // Bulletin chain assumes it is bridged with Polkadot Bridge Hub - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_polkadot::WITH_BRIDGE_HUB_POLKADOT_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubRococo; - type BridgedChain = RococoBulletin; - type BridgedHeaderChain = BridgeRococoBulletinGrandpa; -} - -/// Maximal outbound payload size of BridgeHubRococo -> RococoBulletin messages. -pub type ToRococoBulletinMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// RococoBulletin chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct RococoBulletin; - -impl UnderlyingChainProvider for RococoBulletin { - type Chain = bp_polkadot_bulletin::PolkadotBulletin; -} - -impl messages::BridgedChainWithMessages for RococoBulletin {} - /// Signed extension that refunds relayers that are delivering messages from the Rococo Bulletin /// chain. pub type OnBridgeHubRococoRefundRococoBulletinMessages = RefundSignedExtensionAdapter< @@ -189,22 +151,20 @@ impl pallet_bridge_messages::Config for Runt type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_bridge_messages_rococo_to_rococo_bulletin::WeightInfo; - type BridgedChainId = RococoBulletinChainId; + + type ThisChain = bp_bridge_hub_rococo::BridgeHubRococo; + type BridgedChain = bp_polkadot_bulletin::PolkadotBulletin; + type BridgedHeaderChain = BridgeRococoBulletinGrandpa; + type ActiveOutboundLanes = ActiveOutboundLanesToRococoBulletin; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = ToRococoBulletinMaximalOutboundPayloadSize; type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = (); - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = XcmBlobMessageDispatch; type OnMessagesDelivered = OnMessagesDeliveredFromRococoBulletin; @@ -267,8 +227,7 @@ mod tests { runtime: Runtime, with_bridged_chain_grandpa_instance: BridgeGrandpaRococoBulletinInstance, with_bridged_chain_messages_instance: WithRococoBulletinMessagesInstance, - bridge: WithRococoBulletinMessageBridge, - this_chain: bp_rococo::Rococo, + this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_polkadot_bulletin::PolkadotBulletin, ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 1681ac7f4687493c82c0a3233439b2a9d47a1ad0..07bb718bd13d60bb8eafd980a1fd642b52a97012 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -17,27 +17,21 @@ //! Bridge definitions used on BridgeHubRococo for bridging to BridgeHubWestend. use crate::{ - bridge_common_config::{ - BridgeHubRococo, BridgeParachainWestendInstance, DeliveryRewardInBalance, - }, + bridge_common_config::{BridgeParachainWestendInstance, DeliveryRewardInBalance}, weights, xcm_config::UniversalLocation, - AccountId, BridgeWestendMessages, PolkadotXcm, Runtime, RuntimeEvent, XcmOverBridgeHubWestend, - XcmRouter, + BridgeWestendMessages, PolkadotXcm, Runtime, RuntimeEvent, XcmOverBridgeHubWestend, XcmRouter, +}; +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, LaneId, }; -use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, RefundableMessagesLane, }, - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, UnderlyingChainProvider, - }, messages_xcm_extension::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, @@ -46,7 +40,6 @@ use bridge_runtime_common::{ use codec::Encode; use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, @@ -54,11 +47,7 @@ use xcm::{ use xcm_builder::BridgeBlobDispatcher; parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubWestendChainId: bp_runtime::ChainId = BridgeHubWestend::ID; + pub const BridgeHubWestendChainId: bp_runtime::ChainId = bp_bridge_hub_westend::BridgeHubWestend::ID; pub BridgeRococoToWestendMessagesPalletInstance: InteriorLocation = [PalletInstance(::index() as u8)].into(); pub WestendGlobalConsensusNetwork: NetworkId = NetworkId::Westend; pub WestendGlobalConsensusNetworkLocation: Location = Location::new( @@ -148,34 +137,6 @@ impl XcmBlobHauler for ToBridgeHubWestendXcmBlobHauler { type OnMessagesDeliveredFromWestend = XcmBlobHaulerAdapter; -/// Messaging Bridge configuration for BridgeHubRococo -> BridgeHubWestend -pub struct WithBridgeHubWestendMessageBridge; -impl MessageBridge for WithBridgeHubWestendMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubRococo; - type BridgedChain = BridgeHubWestend; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainWestendInstance, - bp_bridge_hub_westend::BridgeHubWestend, - >; -} - -/// Maximal outbound payload size of BridgeHubRococo -> BridgeHubWestend messages. -pub type ToBridgeHubWestendMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubWestend chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWestend; - -impl UnderlyingChainProvider for BridgeHubWestend { - type Chain = bp_bridge_hub_westend::BridgeHubWestend; -} - -impl messages::BridgedChainWithMessages for BridgeHubWestend {} - /// Signed extension that refunds relayers that are delivering messages from the Westend parachain. pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundSignedExtensionAdapter< RefundBridgedMessages< @@ -196,26 +157,28 @@ pub type WithBridgeHubWestendMessagesInstance = pallet_bridge_messages::Instance impl pallet_bridge_messages::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_bridge_messages_rococo_to_westend::WeightInfo; - type BridgedChainId = BridgeHubWestendChainId; + + type ThisChain = bp_bridge_hub_rococo::BridgeHubRococo; + type BridgedChain = bp_bridge_hub_westend::BridgeHubWestend; + type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< + Runtime, + BridgeParachainWestendInstance, + bp_bridge_hub_westend::BridgeHubWestend, + >; + type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubWestend; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = ToBridgeHubWestendMaximalOutboundPayloadSize; type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubWestendMessagesInstance, DeliveryRewardInBalance, >; - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = XcmBlobMessageDispatch< FromWestendMessageBlobDispatcher, Self::WeightInfo, @@ -248,9 +211,8 @@ mod tests { assert_complete_bridge_types, extensions::refund_relayer_extension::RefundableParachain, integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, + assert_complete_with_parachain_bridge_constants, check_message_lane_weights, + AssertChainConstants, AssertCompleteBridgeConstants, }, }; use parachains_common::Balance; @@ -292,36 +254,20 @@ mod tests { runtime: Runtime, with_bridged_chain_grandpa_instance: BridgeGrandpaWestendInstance, with_bridged_chain_messages_instance: WithBridgeHubWestendMessagesInstance, - bridge: WithBridgeHubWestendMessageBridge, - this_chain: bp_rococo::Rococo, - bridged_chain: bp_westend::Westend, + this_chain: bp_bridge_hub_rococo::BridgeHubRococo, + bridged_chain: bp_bridge_hub_westend::BridgeHubWestend, ); - assert_complete_bridge_constants::< + assert_complete_with_parachain_bridge_constants::< Runtime, BridgeGrandpaWestendInstance, WithBridgeHubWestendMessagesInstance, - WithBridgeHubWestendMessageBridge, + bp_westend::Westend, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_rococo::BlockLength::get(), block_weights: bp_bridge_hub_rococo::BlockWeightsForAsyncBacking::get(), }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_westend::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_westend::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: BridgeHubWestend::ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: - bp_westend::WITH_WESTEND_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_westend::WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME, - }, }); bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< @@ -332,7 +278,7 @@ mod tests { bridge_runtime_common::extensions::priority_calculator::per_parachain_header::ensure_priority_boost_is_sane::< Runtime, - RefundableParachain, + RefundableParachain, PriorityBoostPerParachainHeader, >(FEE_BOOST_PER_PARACHAIN_HEADER); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index e7868bcbc78d0af4b56f13a5fe844ccc2a368813..8ca5898d1a104cfe6b1773ebca3b9ae0004167f5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -98,7 +98,7 @@ pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use rococo_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; use xcm::prelude::*; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -214,7 +214,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -966,7 +966,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -979,11 +979,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -998,7 +998,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -1008,6 +1008,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -1420,7 +1432,7 @@ impl_runtime_apis! { prepare_message_proof_from_parachain::< Runtime, bridge_common_config::BridgeGrandpaWestendInstance, - bridge_to_westend_config::WithBridgeHubWestendMessageBridge, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >(params, generate_xcm_builder_bridge_message_sample([GlobalConsensus(Rococo), Parachain(42)].into())) } @@ -1430,7 +1442,7 @@ impl_runtime_apis! { prepare_message_delivery_proof_from_parachain::< Runtime, bridge_common_config::BridgeGrandpaWestendInstance, - bridge_to_westend_config::WithBridgeHubWestendMessageBridge, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >(params) } @@ -1455,7 +1467,7 @@ impl_runtime_apis! { prepare_message_proof_from_grandpa_chain::< Runtime, bridge_common_config::BridgeGrandpaRococoBulletinInstance, - bridge_to_bulletin_config::WithRococoBulletinMessageBridge, + bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, >(params, generate_xcm_builder_bridge_message_sample([GlobalConsensus(Rococo), Parachain(42)].into())) } @@ -1465,7 +1477,7 @@ impl_runtime_apis! { prepare_message_delivery_proof_from_grandpa_chain::< Runtime, bridge_common_config::BridgeGrandpaRococoBulletinInstance, - bridge_to_bulletin_config::WithRococoBulletinMessageBridge, + bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, >(params) } @@ -1491,7 +1503,7 @@ impl_runtime_apis! { fn prepare_parachain_heads_proof( parachains: &[bp_polkadot_core::parachains::ParaId], parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, + proof_params: bp_runtime::UnverifiedStorageProofParams, ) -> ( pallet_bridge_parachains::RelayBlockNumber, pallet_bridge_parachains::RelayBlockHash, @@ -1501,7 +1513,7 @@ impl_runtime_apis! { prepare_parachain_heads_proof::( parachains, parachain_head_size, - proof_size, + proof_params, ) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs index 11e1439a1f6df2423421faf85ce6dd75c37e045b..4ce57b2e50161a812ddf17a9bbd20d5fc58682a7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yaoqqom-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,15 +64,17 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo Weight { + fn submit_finality_proof(p: u32, v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `438 + p * (60 ยฑ0)` // Estimated: `51735` - // Minimum execution time: 300_829_000 picoseconds. - Weight::from_parts(321_573_000, 0) + // Minimum execution time: 325_365_000 picoseconds. + Weight::from_parts(14_958_535, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 25_917 - .saturating_add(Weight::from_parts(48_613_160, 0).saturating_mul(p.into())) + // Standard Error: 15_085 + .saturating_add(Weight::from_parts(41_227_904, 0).saturating_mul(p.into())) + // Standard Error: 50_338 + .saturating_add(Weight::from_parts(2_664_555, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -90,8 +92,8 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `621` + // Measured: `654` // Estimated: `52645` - // Minimum execution time: 36_661_000 picoseconds. - Weight::from_parts(38_106_000, 0) + // Minimum execution time: 37_206_000 picoseconds. + Weight::from_parts(38_545_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,13 +74,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { + /// The range of component `n` is `[1, 4076]`. + /// The range of component `n` is `[1, 4076]`. + fn receive_n_messages_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `621` + // Measured: `654` // Estimated: `52645` - // Minimum execution time: 47_599_000 picoseconds. - Weight::from_parts(49_731_000, 0) + // Minimum execution time: 37_075_000 picoseconds. + Weight::from_parts(37_757_000, 0) .saturating_add(Weight::from_parts(0, 52645)) + // Standard Error: 5_776 + .saturating_add(Weight::from_parts(11_586_768, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -94,10 +98,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `621` + // Measured: `654` // Estimated: `52645` - // Minimum execution time: 42_211_000 picoseconds. - Weight::from_parts(43_454_000, 0) + // Minimum execution time: 42_087_000 picoseconds. + Weight::from_parts(42_970_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -108,30 +112,20 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `589` - // Estimated: `52645` - // Minimum execution time: 36_072_000 picoseconds. - Weight::from_parts(37_260_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `589` + // Measured: `654` // Estimated: `52645` - // Minimum execution time: 66_995_000 picoseconds. - Weight::from_parts(68_661_000, 0) + // Minimum execution time: 35_055_000 picoseconds. + Weight::from_parts(36_987_740, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(2_316, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) @@ -142,10 +136,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `588` + // Measured: `621` // Estimated: `2543` - // Minimum execution time: 25_553_000 picoseconds. - Weight::from_parts(26_205_000, 0) + // Minimum execution time: 24_326_000 picoseconds. + Weight::from_parts(25_169_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -158,10 +152,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `588` + // Measured: `621` // Estimated: `2543` - // Minimum execution time: 25_610_000 picoseconds. - Weight::from_parts(26_273_000, 0) + // Minimum execution time: 24_484_000 picoseconds. + Weight::from_parts(25_130_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -174,10 +168,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `588` + // Measured: `621` // Estimated: `2543` - // Minimum execution time: 25_651_000 picoseconds. - Weight::from_parts(26_172_000, 0) + // Minimum execution time: 24_450_000 picoseconds. + Weight::from_parts(25_164_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -191,7 +185,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -201,20 +195,20 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `780` + // Measured: `813` // Estimated: `52645` - // Minimum execution time: 64_219_000 picoseconds. - Weight::from_parts(65_848_290, 0) + // Minimum execution time: 54_317_000 picoseconds. + Weight::from_parts(59_171_547, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 43 - .saturating_add(Weight::from_parts(7_577, 0).saturating_mul(i.into())) + // Standard Error: 7 + .saturating_add(Weight::from_parts(7_566, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs index 30ea9eed4a5b4f187ea76633400cff8c39991b46..9c05dae979daa0107e7754cc7107f80c3f753b7e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_messages` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -51,7 +51,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) @@ -60,10 +60,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `605` + // Measured: `658` // Estimated: `52645` - // Minimum execution time: 40_349_000 picoseconds. - Weight::from_parts(41_856_000, 0) + // Minimum execution time: 41_396_000 picoseconds. + Weight::from_parts(43_141_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -71,27 +71,31 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { + /// The range of component `n` is `[1, 4076]`. + /// The range of component `n` is `[1, 4076]`. + fn receive_n_messages_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `605` + // Measured: `658` // Estimated: `52645` - // Minimum execution time: 50_514_000 picoseconds. - Weight::from_parts(52_254_000, 0) + // Minimum execution time: 41_095_000 picoseconds. + Weight::from_parts(42_030_000, 0) .saturating_add(Weight::from_parts(0, 52645)) + // Standard Error: 5_702 + .saturating_add(Weight::from_parts(11_627_951, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) @@ -100,10 +104,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `605` + // Measured: `658` // Estimated: `52645` - // Minimum execution time: 45_761_000 picoseconds. - Weight::from_parts(47_075_000, 0) + // Minimum execution time: 45_912_000 picoseconds. + Weight::from_parts(47_564_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -111,37 +115,25 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `573` - // Estimated: `52645` - // Minimum execution time: 39_098_000 picoseconds. - Weight::from_parts(40_577_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `573` + // Measured: `658` // Estimated: `52645` - // Minimum execution time: 69_120_000 picoseconds. - Weight::from_parts(71_810_000, 0) + // Minimum execution time: 39_175_000 picoseconds. + Weight::from_parts(41_674_095, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(2_305, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) @@ -156,11 +148,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3912` - // Minimum execution time: 32_325_000 picoseconds. - Weight::from_parts(33_070_000, 0) - .saturating_add(Weight::from_parts(0, 3912)) + // Measured: `501` + // Estimated: `3966` + // Minimum execution time: 32_033_000 picoseconds. + Weight::from_parts(33_131_000, 0) + .saturating_add(Weight::from_parts(0, 3966)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -176,11 +168,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3912` - // Minimum execution time: 32_180_000 picoseconds. - Weight::from_parts(33_202_000, 0) - .saturating_add(Weight::from_parts(0, 3912)) + // Measured: `501` + // Estimated: `3966` + // Minimum execution time: 32_153_000 picoseconds. + Weight::from_parts(33_126_000, 0) + .saturating_add(Weight::from_parts(0, 3966)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -196,10 +188,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `447` + // Measured: `501` // Estimated: `6086` - // Minimum execution time: 36_774_000 picoseconds. - Weight::from_parts(37_774_000, 0) + // Minimum execution time: 36_387_000 picoseconds. + Weight::from_parts(37_396_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -207,7 +199,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) @@ -215,7 +207,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -225,18 +217,18 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `736` + // Measured: `789` // Estimated: `52645` - // Minimum execution time: 65_934_000 picoseconds. - Weight::from_parts(67_915_916, 0) + // Minimum execution time: 56_562_000 picoseconds. + Weight::from_parts(61_452_871, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 65 - .saturating_add(Weight::from_parts(7_190, 0).saturating_mul(i.into())) + // Standard Error: 9 + .saturating_add(Weight::from_parts(7_587, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs index ea68852804e3955577bf822d42887bf5bd772657..8eb291ea14523b15d6c44b8ee72d637fcbe11166 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_parachains` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -56,20 +56,22 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `558` // Estimated: `2543` - // Minimum execution time: 31_135_000 picoseconds. - Weight::from_parts(32_061_351, 0) + // Minimum execution time: 34_889_000 picoseconds. + Weight::from_parts(36_100_759, 0) .saturating_add(Weight::from_parts(0, 2543)) - // Standard Error: 80_309 - .saturating_add(Weight::from_parts(99_724, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + // Standard Error: 102_466 + .saturating_add(Weight::from_parts(178_820, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) @@ -79,17 +81,19 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `558` // Estimated: `2543` - // Minimum execution time: 32_263_000 picoseconds. - Weight::from_parts(33_139_000, 0) + // Minimum execution time: 36_501_000 picoseconds. + Weight::from_parts(37_266_000, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) @@ -99,16 +103,18 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `558` // Estimated: `2543` - // Minimum execution time: 61_313_000 picoseconds. - Weight::from_parts(62_200_000, 0) + // Minimum execution time: 66_059_000 picoseconds. + Weight::from_parts(67_139_000, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs index 5ab4cb900d848f37f1a5777b686d294837688495..f8bb983e80aa776ae7fe8c9a756935096956e7dd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_relayers` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,10 +54,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_rewards() -> Weight { // Proof Size summary in bytes: - // Measured: `244` + // Measured: `278` // Estimated: `3593` - // Minimum execution time: 45_393_000 picoseconds. - Weight::from_parts(46_210_000, 0) + // Minimum execution time: 44_224_000 picoseconds. + Weight::from_parts(44_905_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -70,10 +70,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `Balances::Reserves` (`max_values`: None, `max_size`: Some(1249), added: 3724, mode: `MaxEncodedLen`) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `97` + // Measured: `131` // Estimated: `4714` - // Minimum execution time: 23_767_000 picoseconds. - Weight::from_parts(24_217_000, 0) + // Minimum execution time: 23_902_000 picoseconds. + Weight::from_parts(24_702_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,10 +84,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `Balances::Reserves` (`max_values`: None, `max_size`: Some(1249), added: 3724, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `197` + // Measured: `231` // Estimated: `4714` - // Minimum execution time: 25_745_000 picoseconds. - Weight::from_parts(26_319_000, 0) + // Minimum execution time: 24_469_000 picoseconds. + Weight::from_parts(25_176_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -100,10 +100,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn slash_and_deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `300` + // Measured: `334` // Estimated: `4714` - // Minimum execution time: 27_497_000 picoseconds. - Weight::from_parts(27_939_000, 0) + // Minimum execution time: 27_518_000 picoseconds. + Weight::from_parts(28_068_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -112,10 +112,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn register_relayer_reward() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `3538` - // Minimum execution time: 5_584_000 picoseconds. - Weight::from_parts(5_908_000, 0) + // Minimum execution time: 5_484_000 picoseconds. + Weight::from_parts(5_718_000, 0) .saturating_add(Weight::from_parts(0, 3538)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index abd84f8e89b07799758c36b002c30db742305927..bafc973bdac444def7058de4a1a2c3feceb1abfc 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 61_813_000 picoseconds. - Weight::from_parts(62_996_000, 6196) + // Minimum execution time: 60_119_000 picoseconds. + Weight::from_parts(61_871_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_044_000 picoseconds. - Weight::from_parts(2_112_000, 0) + // Minimum execution time: 998_000 picoseconds. + Weight::from_parts(1_038_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +86,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_472_000 picoseconds. - Weight::from_parts(7_723_000, 3497) + // Minimum execution time: 6_327_000 picoseconds. + Weight::from_parts(6_520_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_414_000 picoseconds. - Weight::from_parts(8_765_000, 0) + // Minimum execution time: 6_783_000 picoseconds. + Weight::from_parts(7_117_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_192_000 picoseconds. - Weight::from_parts(2_243_000, 0) + // Minimum execution time: 1_589_000 picoseconds. + Weight::from_parts(1_655_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_866_000 picoseconds. - Weight::from_parts(1_931_000, 0) + // Minimum execution time: 1_013_000 picoseconds. + Weight::from_parts(1_045_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_847_000 picoseconds. - Weight::from_parts(1_921_000, 0) + // Minimum execution time: 1_005_000 picoseconds. + Weight::from_parts(1_044_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_797_000 picoseconds. - Weight::from_parts(1_880_000, 0) + // Minimum execution time: 964_000 picoseconds. + Weight::from_parts(1_011_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_458_000 picoseconds. - Weight::from_parts(2_523_000, 0) + // Minimum execution time: 1_005_000 picoseconds. + Weight::from_parts(1_027_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_833_000 picoseconds. - Weight::from_parts(1_906_000, 0) + // Minimum execution time: 980_000 picoseconds. + Weight::from_parts(1_009_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 54_659_000 picoseconds. - Weight::from_parts(56_025_000, 6196) + // Minimum execution time: 56_726_000 picoseconds. + Weight::from_parts(59_300_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +170,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_953_000 picoseconds. - Weight::from_parts(11_220_000, 3555) + // Minimum execution time: 8_962_000 picoseconds. + Weight::from_parts(9_519_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_834_000 picoseconds. - Weight::from_parts(1_892_000, 0) + // Minimum execution time: 999_000 picoseconds. + Weight::from_parts(1_035_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +200,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 22_238_000 picoseconds. - Weight::from_parts(22_690_000, 3503) + // Minimum execution time: 20_313_000 picoseconds. + Weight::from_parts(21_000_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +211,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_798_000 picoseconds. - Weight::from_parts(3_936_000, 0) + // Minimum execution time: 2_820_000 picoseconds. + Weight::from_parts(2_949_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_985_000 picoseconds. - Weight::from_parts(3_099_000, 0) + // Minimum execution time: 1_293_000 picoseconds. + Weight::from_parts(1_354_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_955_000 picoseconds. - Weight::from_parts(2_050_000, 0) + // Minimum execution time: 1_076_000 picoseconds. + Weight::from_parts(1_114_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_939_000 picoseconds. - Weight::from_parts(1_990_000, 0) + // Minimum execution time: 1_014_000 picoseconds. + Weight::from_parts(1_055_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_841_000 picoseconds. - Weight::from_parts(1_900_000, 0) + // Minimum execution time: 979_000 picoseconds. + Weight::from_parts(1_019_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_081_000 picoseconds. - Weight::from_parts(2_145_000, 0) + // Minimum execution time: 1_161_000 picoseconds. + Weight::from_parts(1_208_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +270,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 59_600_000 picoseconds. - Weight::from_parts(61_572_000, 6196) + // Minimum execution time: 62_250_000 picoseconds. + Weight::from_parts(64_477_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +279,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_390_000 picoseconds. - Weight::from_parts(4_517_000, 0) + // Minimum execution time: 4_286_000 picoseconds. + Weight::from_parts(4_476_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +302,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 53_864_000 picoseconds. - Weight::from_parts(55_527_000, 6196) + // Minimum execution time: 58_253_000 picoseconds. + Weight::from_parts(59_360_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +311,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_879_000 picoseconds. - Weight::from_parts(1_947_000, 0) + // Minimum execution time: 1_026_000 picoseconds. + Weight::from_parts(1_065_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_827_000 picoseconds. - Weight::from_parts(1_900_000, 0) + // Minimum execution time: 993_000 picoseconds. + Weight::from_parts(1_015_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_898_000, 0) + // Minimum execution time: 966_000 picoseconds. + Weight::from_parts(999_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -339,16 +339,16 @@ impl WeightInfo { // Storage: `BridgeWestendMessages::OutboundLanesCongestedSignals` (r:1 w:0) // Proof: `BridgeWestendMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) // Storage: `BridgeWestendMessages::OutboundMessages` (r:0 w:1) - // Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + // Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `6130` - // Minimum execution time: 41_598_000 picoseconds. - Weight::from_parts(42_219_173, 6130) - // Standard Error: 426 - .saturating_add(Weight::from_parts(452_460, 0).saturating_mul(x.into())) + // Minimum execution time: 37_014_000 picoseconds. + Weight::from_parts(38_096_655, 6130) + // Standard Error: 61 + .saturating_add(Weight::from_parts(45_146, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -356,14 +356,14 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_812_000 picoseconds. - Weight::from_parts(1_898_000, 0) + // Minimum execution time: 996_000 picoseconds. + Weight::from_parts(1_025_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_915_000 picoseconds. - Weight::from_parts(1_976_000, 0) + // Minimum execution time: 1_001_000 picoseconds. + Weight::from_parts(1_044_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index b309232825db3aa964b2fa1a1d8d739f06ec3153..1d3d9e55f7eeb99bb46c23aba0bf9940515c69d4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -148,8 +148,7 @@ mod bridge_hub_westend_tests { use bridge_hub_test_utils::test_cases::from_parachain; use bridge_to_westend_config::{ BridgeHubWestendChainId, BridgeHubWestendLocation, WestendGlobalConsensusNetwork, - WithBridgeHubWestendMessageBridge, WithBridgeHubWestendMessagesInstance, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, + WithBridgeHubWestendMessagesInstance, XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, }; // Para id of sibling chain used in tests. @@ -162,7 +161,6 @@ mod bridge_hub_westend_tests { BridgeGrandpaWestendInstance, BridgeParachainWestendInstance, WithBridgeHubWestendMessagesInstance, - WithBridgeHubWestendMessageBridge, >; #[test] @@ -457,8 +455,8 @@ mod bridge_hub_bulletin_tests { use bridge_hub_test_utils::test_cases::from_grandpa_chain; use bridge_to_bulletin_config::{ RococoBulletinChainId, RococoBulletinGlobalConsensusNetwork, - RococoBulletinGlobalConsensusNetworkLocation, WithRococoBulletinMessageBridge, - WithRococoBulletinMessagesInstance, XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, + RococoBulletinGlobalConsensusNetworkLocation, WithRococoBulletinMessagesInstance, + XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, }; // Para id of sibling chain used in tests. @@ -470,7 +468,6 @@ mod bridge_hub_bulletin_tests { AllPalletsWithoutSystem, BridgeGrandpaRococoBulletinInstance, WithRococoBulletinMessagesInstance, - WithRococoBulletinMessageBridge, >; #[test] @@ -594,44 +591,4 @@ mod bridge_hub_bulletin_tests { construct_and_apply_extrinsic, ) } - - #[test] - pub fn can_calculate_fee_for_standalone_message_delivery_transaction() { - bridge_hub_test_utils::check_sane_fees_values( - "bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs", - bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get(), - || { - from_grandpa_chain::can_calculate_fee_for_standalone_message_delivery_transaction::< - RuntimeTestsAdapter, - >(collator_session_keys(), construct_and_estimate_extrinsic_fee) - }, - Perbill::from_percent(33), - None, /* we don't want lowering according to the Bulletin setup, because - * `from_grandpa_chain` is cheaper then `from_parachain_chain` */ - &format!( - "Estimate fee for `single message delivery` for runtime: {:?}", - ::Version::get() - ), - ) - } - - #[test] - pub fn can_calculate_fee_for_standalone_message_confirmation_transaction() { - bridge_hub_test_utils::check_sane_fees_values( - "bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs", - bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get(), - || { - from_grandpa_chain::can_calculate_fee_for_standalone_message_confirmation_transaction::< - RuntimeTestsAdapter, - >(collator_session_keys(), construct_and_estimate_extrinsic_fee) - }, - Perbill::from_percent(33), - None, /* we don't want lowering according to the Bulletin setup, because - * `from_grandpa_chain` is cheaper then `from_parachain_chain` */ - &format!( - "Estimate fee for `single message confirmation` for runtime: {:?}", - ::Version::get() - ), - ) - } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 0f16d629fc26051e53d3cabef19290fb4e8cff5e..e2671d3d606d1e993a38de2eeacf8a9675bb3b82 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -10,102 +10,101 @@ license = "Apache-2.0" workspace = true [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -tuplex = { version = "0.1", default-features = false } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +westend-runtime-constants = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } -bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } -bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } -bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } -bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } -bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } -bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } -pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } -pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } -pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } -pallet-xcm-bridge-hub = { path = "../../../../../bridges/modules/xcm-bridge-hub", default-features = false } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } -bridge-hub-common = { path = "../common", default-features = false } +bp-asset-hub-rococo = { workspace = true } +bp-asset-hub-westend = { workspace = true } +bp-bridge-hub-rococo = { workspace = true } +bp-bridge-hub-westend = { workspace = true } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { features = ["test-helpers"], workspace = true } +bp-rococo = { workspace = true } +bp-westend = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +pallet-bridge-messages = { workspace = true } +pallet-bridge-parachains = { workspace = true } +pallet-bridge-relayers = { workspace = true } +pallet-xcm-bridge-hub = { workspace = true } +bridge-runtime-common = { workspace = true } +bridge-hub-common = { workspace = true } [dev-dependencies] -static_assertions = "1.1" -bridge-hub-test-utils = { path = "../test-utils" } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = ["integrity-test"] } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +static_assertions = { workspace = true, default-features = true } +bridge-hub-test-utils = { workspace = true, default-features = true } +bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] @@ -182,11 +181,10 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", - "tuplex/std", "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -221,7 +219,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index 425b53da30fc8a176fcddfe145fab66a41b60f8a..09d55f4323ab5aa92be471758264e688e7d45e05 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -18,10 +18,12 @@ use crate::{ bridge_common_config::DeliveryRewardInBalance, weights, xcm_config::UniversalLocation, - AccountId, BridgeRococoMessages, PolkadotXcm, Runtime, RuntimeEvent, RuntimeOrigin, - XcmOverBridgeHubRococo, XcmRouter, + BridgeRococoMessages, PolkadotXcm, Runtime, RuntimeEvent, XcmOverBridgeHubRococo, XcmRouter, +}; +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, LaneId, }; -use bp_messages::LaneId; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_runtime::Chain; use bridge_runtime_common::{ @@ -29,12 +31,6 @@ use bridge_runtime_common::{ ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, RefundableMessagesLane, }, - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, messages_xcm_extension::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, @@ -45,7 +41,6 @@ use frame_support::{ parameter_types, traits::{ConstU32, PalletInfoAccess}, }; -use sp_runtime::RuntimeDebug; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, @@ -59,11 +54,7 @@ parameter_types! { pub const RococoBridgeParachainPalletName: &'static str = "Paras"; pub const MaxRococoParaHeadDataSize: u32 = bp_rococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_westend::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_westend::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubRococoChainId: bp_runtime::ChainId = BridgeHubRococo::ID; + pub const BridgeHubRococoChainId: bp_runtime::ChainId = bp_bridge_hub_rococo::BridgeHubRococo::ID; pub BridgeWestendToRococoMessagesPalletInstance: InteriorLocation = [PalletInstance(::index() as u8)].into(); pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::Rococo; pub RococoGlobalConsensusNetworkLocation: Location = Location::new( @@ -153,46 +144,6 @@ impl XcmBlobHauler for ToBridgeHubRococoXcmBlobHauler { /// On messages delivered callback. type OnMessagesDelivered = XcmBlobHaulerAdapter; -/// Messaging Bridge configuration for BridgeHubWestend -> BridgeHubRococo -pub struct WithBridgeHubRococoMessageBridge; -impl MessageBridge for WithBridgeHubRococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_westend::WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubWestend; - type BridgedChain = BridgeHubRococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainRococoInstance, - bp_bridge_hub_rococo::BridgeHubRococo, - >; -} - -/// Maximal outbound payload size of BridgeHubWestend -> BridgeHubRococo messages. -type ToBridgeHubRococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubRococo {} - -/// BridgeHubWestend chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWestend; - -impl UnderlyingChainProvider for BridgeHubWestend { - type Chain = bp_bridge_hub_westend::BridgeHubWestend; -} - -impl ThisChainWithMessages for BridgeHubWestend { - type RuntimeOrigin = RuntimeOrigin; -} - /// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< RefundBridgedMessages< @@ -237,26 +188,28 @@ pub type WithBridgeHubRococoMessagesInstance = pallet_bridge_messages::Instance1 impl pallet_bridge_messages::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_bridge_messages::WeightInfo; - type BridgedChainId = BridgeHubRococoChainId; + + type ThisChain = bp_bridge_hub_westend::BridgeHubWestend; + type BridgedChain = bp_bridge_hub_rococo::BridgeHubRococo; + type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< + Runtime, + BridgeParachainRococoInstance, + bp_bridge_hub_rococo::BridgeHubRococo, + >; + type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubRococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = ToBridgeHubRococoMaximalOutboundPayloadSize; type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubRococoMessagesInstance, DeliveryRewardInBalance, >; - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = XcmBlobMessageDispatch< FromRococoMessageBlobDispatcher, Self::WeightInfo, @@ -287,9 +240,8 @@ mod tests { assert_complete_bridge_types, extensions::refund_relayer_extension::RefundableParachain, integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, + assert_complete_with_parachain_bridge_constants, check_message_lane_weights, + AssertChainConstants, AssertCompleteBridgeConstants, }, }; use parachains_common::Balance; @@ -331,35 +283,20 @@ mod tests { runtime: Runtime, with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, - bridge: WithBridgeHubRococoMessageBridge, - this_chain: bp_westend::Westend, - bridged_chain: bp_rococo::Rococo, + this_chain: bp_bridge_hub_westend::BridgeHubWestend, + bridged_chain: bp_bridge_hub_rococo::BridgeHubRococo, ); - assert_complete_bridge_constants::< + assert_complete_with_parachain_bridge_constants::< Runtime, BridgeGrandpaRococoInstance, WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, + bp_rococo::Rococo, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_westend::BlockLength::get(), block_weights: bp_bridge_hub_westend::BlockWeightsForAsyncBacking::get(), }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: BridgeHubRococo::ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_westend::WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_rococo::WITH_ROCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - }, }); bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< @@ -370,7 +307,7 @@ mod tests { bridge_runtime_common::extensions::priority_calculator::per_parachain_header::ensure_priority_boost_is_sane::< Runtime, - RefundableParachain, + RefundableParachain, PriorityBoostPerParachainHeader, >(FEE_BOOST_PER_PARACHAIN_HEADER); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index e26d490f9ac1115b465d707980afc31cb5dfd402..993f60432458739834d9a7c4384243fa597d60fe 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -75,7 +75,7 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; use xcm_config::{XcmOriginToTransactDispatchOrigin, XcmRouter}; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -189,7 +189,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -716,7 +716,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::WestendLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -729,11 +729,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -748,7 +748,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -758,6 +758,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -1106,7 +1118,7 @@ impl_runtime_apis! { prepare_message_proof_from_parachain::< Runtime, bridge_to_rococo_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, + bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, >(params, generate_xcm_builder_bridge_message_sample([GlobalConsensus(Westend), Parachain(42)].into())) } @@ -1116,7 +1128,7 @@ impl_runtime_apis! { prepare_message_delivery_proof_from_parachain::< Runtime, bridge_to_rococo_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, + bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, >(params) } @@ -1142,7 +1154,7 @@ impl_runtime_apis! { fn prepare_parachain_heads_proof( parachains: &[bp_polkadot_core::parachains::ParaId], parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, + proof_params: bp_runtime::UnverifiedStorageProofParams, ) -> ( pallet_bridge_parachains::RelayBlockNumber, pallet_bridge_parachains::RelayBlockHash, @@ -1152,7 +1164,7 @@ impl_runtime_apis! { prepare_parachain_heads_proof::( parachains, parachain_head_size, - proof_size, + proof_params, ) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs index e98be6ba39be74c3532290ea4a7b483640466c10..fa7efc260489bead50ba63969571e018248ddefe 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yaoqqom-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -68,13 +68,13 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo pallet_bridge_grandpa::WeightInfo for WeightInfo pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) @@ -60,10 +60,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `502` + // Measured: `522` // Estimated: `52645` - // Minimum execution time: 40_646_000 picoseconds. - Weight::from_parts(41_754_000, 0) + // Minimum execution time: 40_748_000 picoseconds. + Weight::from_parts(41_836_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -71,27 +71,30 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { + /// The range of component `n` is `[1, 4076]`. + fn receive_n_messages_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `502` + // Measured: `522` // Estimated: `52645` - // Minimum execution time: 50_898_000 picoseconds. - Weight::from_parts(52_743_000, 0) + // Minimum execution time: 40_923_000 picoseconds. + Weight::from_parts(41_287_000, 0) .saturating_add(Weight::from_parts(0, 52645)) + // Standard Error: 9_774 + .saturating_add(Weight::from_parts(11_469_207, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) @@ -100,10 +103,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `502` + // Measured: `522` // Estimated: `52645` - // Minimum execution time: 45_848_000 picoseconds. - Weight::from_parts(47_036_000, 0) + // Minimum execution time: 45_946_000 picoseconds. + Weight::from_parts(47_547_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -111,37 +114,24 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `433` - // Estimated: `52645` - // Minimum execution time: 39_085_000 picoseconds. - Weight::from_parts(41_623_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `433` + // Measured: `522` // Estimated: `52645` - // Minimum execution time: 72_754_000 picoseconds. - Weight::from_parts(74_985_000, 0) + // Minimum execution time: 39_668_000 picoseconds. + Weight::from_parts(41_908_980, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(2_209, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) @@ -156,11 +146,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `337` - // Estimated: `3802` - // Minimum execution time: 31_479_000 picoseconds. - Weight::from_parts(32_280_000, 0) - .saturating_add(Weight::from_parts(0, 3802)) + // Measured: `357` + // Estimated: `3822` + // Minimum execution time: 30_544_000 picoseconds. + Weight::from_parts(31_171_000, 0) + .saturating_add(Weight::from_parts(0, 3822)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -176,11 +166,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `337` - // Estimated: `3802` - // Minimum execution time: 31_807_000 picoseconds. - Weight::from_parts(32_219_000, 0) - .saturating_add(Weight::from_parts(0, 3802)) + // Measured: `357` + // Estimated: `3822` + // Minimum execution time: 30_593_000 picoseconds. + Weight::from_parts(31_261_000, 0) + .saturating_add(Weight::from_parts(0, 3822)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -196,10 +186,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `337` + // Measured: `357` // Estimated: `6086` - // Minimum execution time: 36_450_000 picoseconds. - Weight::from_parts(37_288_000, 0) + // Minimum execution time: 34_682_000 picoseconds. + Weight::from_parts(35_277_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -207,7 +197,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) @@ -215,7 +205,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -225,17 +215,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `633` + // Measured: `653` // Estimated: `52645` - // Minimum execution time: 67_047_000 picoseconds. - Weight::from_parts(68_717_105, 0) + // Minimum execution time: 56_465_000 picoseconds. + Weight::from_parts(61_575_775, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 138 - .saturating_add(Weight::from_parts(8_056, 0).saturating_mul(i.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(7_197, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs index 9819bd4065411bec6799de3f2aa41c318f53a122..b4748f1417059e91e482c650f5046ef09b76a7af 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_parachains` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -56,18 +56,20 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeRococoParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `291` + // Measured: `315` // Estimated: `2543` - // Minimum execution time: 29_994_000 picoseconds. - Weight::from_parts(31_005_636, 0) + // Minimum execution time: 34_177_000 picoseconds. + Weight::from_parts(35_662_308, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `BridgeRococoParachains::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) @@ -77,17 +79,19 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeRococoParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `291` + // Measured: `315` // Estimated: `2543` - // Minimum execution time: 31_425_000 picoseconds. - Weight::from_parts(32_163_000, 0) + // Minimum execution time: 35_975_000 picoseconds. + Weight::from_parts(36_510_000, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `BridgeRococoParachains::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) @@ -97,16 +101,18 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeRococoParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `291` + // Measured: `315` // Estimated: `2543` - // Minimum execution time: 60_062_000 picoseconds. - Weight::from_parts(61_201_000, 0) + // Minimum execution time: 62_837_000 picoseconds. + Weight::from_parts(63_562_000, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs index ed96f0cd87c9e73ee8c842ab9f4f5d60bf81c2ac..60d81dc3082a86ea3cea3951d4ed59cf9be8ed56 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_relayers` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `207` // Estimated: `3593` - // Minimum execution time: 45_732_000 picoseconds. - Weight::from_parts(46_282_000, 0) + // Minimum execution time: 43_132_000 picoseconds. + Weight::from_parts(43_923_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -72,8 +72,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `61` // Estimated: `4714` - // Minimum execution time: 22_934_000 picoseconds. - Weight::from_parts(23_531_000, 0) + // Minimum execution time: 22_765_000 picoseconds. + Weight::from_parts(23_576_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +86,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `160` // Estimated: `4714` - // Minimum execution time: 25_187_000 picoseconds. - Weight::from_parts(25_679_000, 0) + // Minimum execution time: 24_013_000 picoseconds. + Weight::from_parts(24_460_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -102,8 +102,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `263` // Estimated: `4714` - // Minimum execution time: 27_015_000 picoseconds. - Weight::from_parts(27_608_000, 0) + // Minimum execution time: 26_946_000 picoseconds. + Weight::from_parts(27_485_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -114,8 +114,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `6` // Estimated: `3538` - // Minimum execution time: 5_207_000 picoseconds. - Weight::from_parts(5_394_000, 0) + // Minimum execution time: 4_658_000 picoseconds. + Weight::from_parts(4_902_000, 0) .saturating_add(Weight::from_parts(0, 3538)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 9281a880c7e1266d65d29436ca88e51e896c0363..73bea66bf7107f0c73696713c293b9fd9a714380 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 61_577_000 picoseconds. - Weight::from_parts(63_216_000, 6196) + // Minimum execution time: 58_505_000 picoseconds. + Weight::from_parts(60_437_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_019_000 picoseconds. - Weight::from_parts(2_146_000, 0) + // Minimum execution time: 510_000 picoseconds. + Weight::from_parts(569_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +86,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_473_000 picoseconds. - Weight::from_parts(7_784_000, 3497) + // Minimum execution time: 5_597_000 picoseconds. + Weight::from_parts(5_884_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_385_000 picoseconds. - Weight::from_parts(8_768_000, 0) + // Minimum execution time: 5_320_000 picoseconds. + Weight::from_parts(5_594_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_181_000 picoseconds. - Weight::from_parts(2_304_000, 0) + // Minimum execution time: 1_164_000 picoseconds. + Weight::from_parts(1_227_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_858_000 picoseconds. - Weight::from_parts(1_919_000, 0) + // Minimum execution time: 528_000 picoseconds. + Weight::from_parts(586_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_855_000 picoseconds. - Weight::from_parts(1_979_000, 0) + // Minimum execution time: 509_000 picoseconds. + Weight::from_parts(571_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_823_000 picoseconds. - Weight::from_parts(1_890_000, 0) + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(546_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_407_000 picoseconds. - Weight::from_parts(2_507_000, 0) + // Minimum execution time: 560_000 picoseconds. + Weight::from_parts(600_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_838_000 picoseconds. - Weight::from_parts(1_894_000, 0) + // Minimum execution time: 514_000 picoseconds. + Weight::from_parts(558_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 54_847_000 picoseconds. - Weight::from_parts(55_742_000, 6196) + // Minimum execution time: 55_871_000 picoseconds. + Weight::from_parts(57_172_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +170,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_614_000 picoseconds. - Weight::from_parts(11_344_000, 3555) + // Minimum execution time: 8_487_000 picoseconds. + Weight::from_parts(8_800_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_826_000 picoseconds. - Weight::from_parts(1_899_000, 0) + // Minimum execution time: 528_000 picoseconds. + Weight::from_parts(569_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +200,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 22_312_000 picoseconds. - Weight::from_parts(22_607_000, 3503) + // Minimum execution time: 19_803_000 picoseconds. + Weight::from_parts(20_368_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +211,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_728_000 picoseconds. - Weight::from_parts(3_914_000, 0) + // Minimum execution time: 2_185_000 picoseconds. + Weight::from_parts(2_332_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_054_000 picoseconds. - Weight::from_parts(3_140_000, 0) + // Minimum execution time: 822_000 picoseconds. + Weight::from_parts(928_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_996_000 picoseconds. - Weight::from_parts(2_148_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(643_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_008_000 picoseconds. - Weight::from_parts(2_077_000, 0) + // Minimum execution time: 503_000 picoseconds. + Weight::from_parts(580_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_837_000 picoseconds. - Weight::from_parts(1_913_000, 0) + // Minimum execution time: 534_000 picoseconds. + Weight::from_parts(577_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_052_000 picoseconds. - Weight::from_parts(2_120_000, 0) + // Minimum execution time: 694_000 picoseconds. + Weight::from_parts(745_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +270,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 58_725_000 picoseconds. - Weight::from_parts(60_271_000, 6196) + // Minimum execution time: 61_083_000 picoseconds. + Weight::from_parts(62_214_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +279,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_570_000 picoseconds. - Weight::from_parts(4_707_000, 0) + // Minimum execution time: 3_261_000 picoseconds. + Weight::from_parts(3_483_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +302,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 54_903_000 picoseconds. - Weight::from_parts(55_711_000, 6196) + // Minimum execution time: 56_270_000 picoseconds. + Weight::from_parts(57_443_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +311,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_872_000 picoseconds. - Weight::from_parts(1_938_000, 0) + // Minimum execution time: 565_000 picoseconds. + Weight::from_parts(628_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_836_000 picoseconds. - Weight::from_parts(1_903_000, 0) + // Minimum execution time: 496_000 picoseconds. + Weight::from_parts(563_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_847_000 picoseconds. - Weight::from_parts(1_900_000, 0) + // Minimum execution time: 518_000 picoseconds. + Weight::from_parts(557_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -339,16 +339,16 @@ impl WeightInfo { // Storage: `BridgeRococoMessages::OutboundLanesCongestedSignals` (r:1 w:0) // Proof: `BridgeRococoMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) // Storage: `BridgeRococoMessages::OutboundMessages` (r:0 w:1) - // Proof: `BridgeRococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + // Proof: `BridgeRococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `225` // Estimated: `6165` - // Minimum execution time: 41_750_000 picoseconds. - Weight::from_parts(43_496_915, 6165) - // Standard Error: 623 - .saturating_add(Weight::from_parts(457_907, 0).saturating_mul(x.into())) + // Minimum execution time: 36_288_000 picoseconds. + Weight::from_parts(37_707_751, 6165) + // Standard Error: 124 + .saturating_add(Weight::from_parts(51_290, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -356,14 +356,14 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_826_000 picoseconds. - Weight::from_parts(1_911_000, 0) + // Minimum execution time: 485_000 picoseconds. + Weight::from_parts(540_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_967_000 picoseconds. - Weight::from_parts(2_096_000, 0) + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(586_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 836594140b2328081ff6c0de8cac40ea82dfb6f7..763271fd7af0e69ae8de2aa4fe507083f5d09d9e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -28,8 +28,8 @@ use bridge_hub_westend_runtime::{ }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoChainId, BridgeHubRococoLocation, - BridgeParachainRococoInstance, WithBridgeHubRococoMessageBridge, - WithBridgeHubRococoMessagesInstance, XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, + BridgeParachainRococoInstance, WithBridgeHubRococoMessagesInstance, + XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, }; use codec::{Decode, Encode}; use frame_support::{dispatch::GetDispatchInfo, parameter_types, traits::ConstU8}; @@ -53,7 +53,6 @@ type RuntimeTestsAdapter = from_parachain::WithRemoteParachainHelperAdapter< BridgeGrandpaRococoInstance, BridgeParachainRococoInstance, WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, >; parameter_types! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index aece34613e6a6c4dfe100e84b0317a96ad7ee97c..fd0eed1c05a1a65e9f5c41d1880f57b708e85119 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -7,16 +7,16 @@ description = "Bridge hub common utilities" license = "Apache-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -snowbridge-core = { path = "../../../../../bridges/snowbridge/primitives/core", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +sp-std = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +cumulus-primitives-core = { workspace = true } +xcm = { workspace = true } +pallet-message-queue = { workspace = true } +snowbridge-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 80f0114cc4cadb6cd7871454eead80a0988d5e7e..fb96d29a497a37084ae83a461f5a2ff2bb6a7af0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -10,47 +10,47 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -impl-trait-for-tuples = "0.2" +codec = { features = ["derive", "max-encoded-len"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-tracing = { path = "../../../../../substrate/primitives/tracing" } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true } +pallet-utility = { workspace = true } +pallet-timestamp = { workspace = true } # Cumulus -asset-test-utils = { path = "../../assets/test-utils" } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } +asset-test-utils = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +parachains-common = { workspace = true } +parachains-runtimes-test-utils = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Bridges -bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } -bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } -bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } -bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } -bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-test-utils = { path = "../../../../../bridges/primitives/test-utils", default-features = false } -pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } -pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } -pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } -pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { workspace = true } +bp-test-utils = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +pallet-bridge-parachains = { workspace = true } +pallet-bridge-messages = { features = ["test-helpers"], workspace = true } +pallet-bridge-relayers = { workspace = true } +bridge-runtime-common = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs index bfa2f0f50f94ca3ba2f663f9646be3165dd48220..8f3c7de61f8402d7b28231325a9ddc3ab3bac052 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs @@ -23,21 +23,12 @@ use crate::{ }; use bp_header_chain::ChainWithGrandpa; -use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, - UnrewardedRelayersState, -}; +use bp_messages::{LaneId, UnrewardedRelayersState}; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{HashOf, UnderlyingChainOf}; -use bridge_runtime_common::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, - }, - messages_xcm_extension::XcmAsPlainPayload, -}; +use bridge_runtime_common::messages_xcm_extension::XcmAsPlainPayload; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::pallet_prelude::BlockNumberFor; +use pallet_bridge_messages::{BridgedChainOf, ThisChainOf}; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; @@ -53,13 +44,10 @@ pub trait WithRemoteGrandpaChainHelper { /// This chain runtime. type Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config - + BridgeGrandpaConfig< - Self::GPI, - BridgedChain = UnderlyingChainOf>, - > + BridgeMessagesConfig< + + BridgeGrandpaConfig> + + BridgeMessagesConfig< Self::MPI, InboundPayload = XcmAsPlainPayload, - InboundRelayer = bp_runtime::AccountIdOf>, OutboundPayload = XcmAsPlainPayload, > + pallet_bridge_relayers::Config; /// All pallets of this chain, excluding system pallet. @@ -69,38 +57,33 @@ pub trait WithRemoteGrandpaChainHelper { type GPI: 'static; /// Instance of the `pallet-bridge-messages`, used to bridge with remote GRANDPA chain. type MPI: 'static; - /// Messages bridge definition. - type MB: MessageBridge; } /// Adapter struct that implements [`WithRemoteGrandpaChainHelper`]. -pub struct WithRemoteGrandpaChainHelperAdapter( - sp_std::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, MPI, MB)>, +pub struct WithRemoteGrandpaChainHelperAdapter( + sp_std::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, MPI)>, ); -impl WithRemoteGrandpaChainHelper - for WithRemoteGrandpaChainHelperAdapter +impl WithRemoteGrandpaChainHelper + for WithRemoteGrandpaChainHelperAdapter where Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config - + BridgeGrandpaConfig>> + + BridgeGrandpaConfig> + BridgeMessagesConfig< MPI, InboundPayload = XcmAsPlainPayload, - InboundRelayer = bp_runtime::AccountIdOf>, OutboundPayload = XcmAsPlainPayload, > + pallet_bridge_relayers::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, GPI: 'static, MPI: 'static, - MB: MessageBridge, { type Runtime = Runtime; type AllPalletsWithoutSystem = AllPalletsWithoutSystem; type GPI = GPI; type MPI = MPI; - type MB = MB; } /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, @@ -124,13 +107,7 @@ pub fn relayed_incoming_message_works( AccountIdOf: From, RuntimeCallOf: From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { helpers::relayed_incoming_message_works::< RuntimeHelper::Runtime, @@ -161,7 +138,8 @@ pub fn relayed_incoming_message_works( // to be submitted by relayer to this chain. let (relay_chain_header, grandpa_justification, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -186,7 +164,7 @@ pub fn relayed_incoming_message_works( ( BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -233,13 +211,7 @@ pub fn free_relay_extrinsic_works( AccountIdOf: From, RuntimeCallOf: From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { // ensure that the runtime allows free header submissions let free_headers_interval = ( // to be submitted by relayer to this chain. let (relay_chain_header, grandpa_justification, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -322,7 +295,7 @@ pub fn free_relay_extrinsic_works( ( BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -370,13 +343,7 @@ pub fn complex_relay_extrinsic_works( RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { helpers::relayed_incoming_message_works::< RuntimeHelper::Runtime, @@ -407,7 +374,8 @@ pub fn complex_relay_extrinsic_works( // to be submitted by relayer to this chain. let (relay_chain_header, grandpa_justification, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -428,7 +396,7 @@ pub fn complex_relay_extrinsic_works( }.into(), BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -470,13 +438,7 @@ where pallet_utility::Config>, RuntimeCallOf: From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -487,7 +449,8 @@ where // the message additionally let (relay_chain_header, grandpa_justification, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -526,19 +489,11 @@ where AccountIdOf: From, RuntimeHelper::Runtime: pallet_utility::Config>, - MessageThisChain: + ThisChainOf: bp_runtime::Chain>, RuntimeCallOf: From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::TargetHeaderChain: - TargetHeaderChain< - XcmAsPlainPayload, - AccountIdOf, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -550,7 +505,8 @@ where }; let (relay_chain_header, grandpa_justification, message_delivery_proof) = test_data::from_grandpa_chain::make_complex_relayer_confirmation_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -587,13 +543,7 @@ where RuntimeHelper: WithRemoteGrandpaChainHelper, RuntimeCallOf: From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -604,7 +554,8 @@ where // the message additionally let (_, _, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -639,19 +590,11 @@ pub fn can_calculate_fee_for_standalone_message_confirmation_transaction: From, - MessageThisChain: + ThisChainOf: bp_runtime::Chain>, RuntimeCallOf: From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::TargetHeaderChain: - TargetHeaderChain< - XcmAsPlainPayload, - AccountIdOf, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -663,7 +606,8 @@ where }; let (_, _, message_delivery_proof) = test_data::from_grandpa_chain::make_complex_relayer_confirmation_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs index 12ab382d9e0f6518afb93f118199170acb5f8cc6..6580648e66063fdd2aff3f8c802cda6efe8e07f3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs @@ -23,22 +23,14 @@ use crate::{ }; use bp_header_chain::ChainWithGrandpa; -use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, - UnrewardedRelayersState, -}; +use bp_messages::{LaneId, UnrewardedRelayersState}; use bp_polkadot_core::parachains::ParaHash; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{HashOf, Parachain, UnderlyingChainOf}; -use bridge_runtime_common::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, - }, - messages_xcm_extension::XcmAsPlainPayload, -}; +use bp_runtime::{Chain, Parachain}; +use bridge_runtime_common::messages_xcm_extension::XcmAsPlainPayload; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::pallet_prelude::BlockNumberFor; +use pallet_bridge_messages::{BridgedChainOf, ThisChainOf}; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; @@ -59,7 +51,6 @@ pub trait WithRemoteParachainHelper { + BridgeMessagesConfig< Self::MPI, InboundPayload = XcmAsPlainPayload, - InboundRelayer = bp_runtime::AccountIdOf>, OutboundPayload = XcmAsPlainPayload, > + pallet_bridge_relayers::Config; /// All pallets of this chain, excluding system pallet. @@ -71,17 +62,15 @@ pub trait WithRemoteParachainHelper { type PPI: 'static; /// Instance of the `pallet-bridge-messages`, used to bridge with remote parachain. type MPI: 'static; - /// Messages bridge definition. - type MB: MessageBridge; } /// Adapter struct that implements `WithRemoteParachainHelper`. -pub struct WithRemoteParachainHelperAdapter( - sp_std::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, PPI, MPI, MB)>, +pub struct WithRemoteParachainHelperAdapter( + sp_std::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, PPI, MPI)>, ); -impl WithRemoteParachainHelper - for WithRemoteParachainHelperAdapter +impl WithRemoteParachainHelper + for WithRemoteParachainHelperAdapter where Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config @@ -90,7 +79,6 @@ where + BridgeMessagesConfig< MPI, InboundPayload = XcmAsPlainPayload, - InboundRelayer = bp_runtime::AccountIdOf>, OutboundPayload = XcmAsPlainPayload, > + pallet_bridge_relayers::Config, AllPalletsWithoutSystem: @@ -98,14 +86,13 @@ where GPI: 'static, PPI: 'static, MPI: 'static, - MB: MessageBridge, + // MB: MessageBridge, { type Runtime = Runtime; type AllPalletsWithoutSystem = AllPalletsWithoutSystem; type GPI = GPI; type PPI = PPI; type MPI = MPI; - type MB = MB; } /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, @@ -131,16 +118,9 @@ pub fn relayed_incoming_message_works( RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { helpers::relayed_incoming_message_works::< RuntimeHelper::Runtime, @@ -179,7 +159,8 @@ pub fn relayed_incoming_message_works( message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -219,7 +200,7 @@ pub fn relayed_incoming_message_works( ( BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -268,16 +249,9 @@ pub fn free_relay_extrinsic_works( RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { // ensure that the runtime allows free header submissions let free_headers_interval = ( message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -390,7 +365,7 @@ pub fn free_relay_extrinsic_works( ( BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -440,16 +415,9 @@ pub fn complex_relay_extrinsic_works( + From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { helpers::relayed_incoming_message_works::< RuntimeHelper::Runtime, @@ -488,7 +456,8 @@ pub fn complex_relay_extrinsic_works( message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -518,7 +487,7 @@ pub fn complex_relay_extrinsic_works( }.into(), BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -565,16 +534,9 @@ where RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -592,7 +554,8 @@ where message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -612,7 +575,6 @@ where RuntimeHelper::GPI, RuntimeHelper::PPI, RuntimeHelper::MPI, - _, >( relay_chain_header, grandpa_justification, @@ -637,23 +599,14 @@ where AccountIdOf: From, RuntimeHelper::Runtime: pallet_utility::Config>, - MessageThisChain: - bp_runtime::Chain>, + ThisChainOf: + Chain>, RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::TargetHeaderChain: - TargetHeaderChain< - XcmAsPlainPayload, - AccountIdOf, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>>, - >, - >, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -672,7 +625,8 @@ where message_delivery_proof, ) = test_data::from_parachain::make_complex_relayer_confirmation_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -714,16 +668,9 @@ where RuntimeHelper: WithRemoteParachainHelper, RuntimeCallOf: From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -741,7 +688,8 @@ where message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -757,7 +705,6 @@ where let call = test_data::from_parachain::make_standalone_relayer_delivery_call::< RuntimeHelper::Runtime, RuntimeHelper::MPI, - _, >( message_proof, helpers::relayer_id_at_bridged_chain::(), @@ -778,22 +725,13 @@ pub fn can_calculate_fee_for_standalone_message_confirmation_transaction: From, - MessageThisChain: - bp_runtime::Chain>, + ThisChainOf: + Chain>, RuntimeCallOf: From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::TargetHeaderChain: - TargetHeaderChain< - XcmAsPlainPayload, - AccountIdOf, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>>, - >, - >, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -806,7 +744,8 @@ where let (_, _, _, _, _, message_delivery_proof) = test_data::from_parachain::make_complex_relayer_confirmation_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index 0ce049cd1c4630c55c244afbc8a72213cb83d6b9..c990c6e5307cb3f1c390fcb28e1261e2713f899f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -29,6 +29,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use pallet_bridge_grandpa::{BridgedBlockHash, BridgedHeader}; +use pallet_bridge_messages::BridgedChainOf; use parachains_common::AccountId; use parachains_runtimes_test_utils::{ mock_open_hrmp_channel, AccountIdOf, CollatorSessionKeys, RuntimeCallOf, SlotDurations, @@ -240,10 +241,12 @@ pub(crate) fn initialize_bridge_grandpa_pallet( pub type CallsAndVerifiers = Vec<(RuntimeCallOf, Box)>; +pub type InboundRelayerId = bp_runtime::AccountIdOf>; + /// Returns relayer id at the bridged chain. pub fn relayer_id_at_bridged_chain, MPI>( -) -> Runtime::InboundRelayer { - Runtime::InboundRelayer::decode(&mut TrailingZeroInput::zeroes()).unwrap() +) -> InboundRelayerId { + Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap() } /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, @@ -260,7 +263,7 @@ pub fn relayed_incoming_message_works( ) -> sp_runtime::DispatchOutcome, prepare_message_proof_import: impl FnOnce( Runtime::AccountId, - Runtime::InboundRelayer, + InboundRelayerId, InteriorLocation, MessageNonce, Xcm<()>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs index e5d5e7cac96ba14f6abfdae792908352f40d3e31..c61a31e5454ba75429929a0c7b879f6baaa72e15 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs @@ -19,30 +19,29 @@ use crate::test_data::prepare_inbound_xcm; use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, MessageNonce, + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages, LaneId, MessageNonce, UnrewardedRelayersState, }; -use bp_runtime::{AccountIdOf, BlockNumberOf, HeaderOf, StorageProofSize, UnderlyingChainOf}; +use bp_runtime::{AccountIdOf, BlockNumberOf, Chain, HeaderOf, UnverifiedStorageProofParams}; use bp_test_utils::make_default_justification; -use bridge_runtime_common::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, - }, - messages_generation::{ - encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, - prepare_messages_storage_proof, - }, - messages_xcm_extension::XcmAsPlainPayload, -}; +use bridge_runtime_common::messages_xcm_extension::XcmAsPlainPayload; use codec::Encode; use pallet_bridge_grandpa::{BridgedChain, BridgedHeader}; use sp_runtime::traits::Header as HeaderT; use xcm::latest::prelude::*; +use crate::test_cases::helpers::InboundRelayerId; use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; use bp_messages::{DeliveredMessages, InboundLaneData, UnrewardedRelayer}; use bp_runtime::HashOf; +use pallet_bridge_messages::{ + messages_generation::{ + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, + }, + BridgedChainOf, +}; use sp_runtime::DigestItem; /// Prepare a batch call with bridged GRANDPA finality and message proof. @@ -50,22 +49,17 @@ pub fn make_complex_relayer_delivery_batch( bridged_header: BridgedHeader, bridged_justification: GrandpaJustification>, message_proof: FromBridgedChainMessagesProof>>, - relayer_id_at_bridged_chain: AccountIdOf>, + relayer_id_at_bridged_chain: InboundRelayerId, ) -> pallet_utility::Call where Runtime: pallet_bridge_grandpa::Config - + pallet_bridge_messages::Config< - MPI, - InboundPayload = XcmAsPlainPayload, - InboundRelayer = AccountIdOf>, - > + pallet_utility::Config, + + pallet_bridge_messages::Config + + pallet_utility::Config, GPI: 'static, MPI: 'static, - >::SourceHeaderChain: SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof>>, - >, ::RuntimeCall: From> + From>, + BridgedChainOf: Chain>>, { let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { finality_target: Box::new(bridged_header), @@ -73,7 +67,7 @@ where }; let submit_message = pallet_bridge_messages::Call::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }; @@ -97,15 +91,9 @@ where + pallet_utility::Config, GPI: 'static, MPI: 'static, - >::TargetHeaderChain: TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>, - >, - >, ::RuntimeCall: From> + From>, + BridgedChainOf: Chain>>, { let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { finality_target: Box::new(bridged_header), @@ -124,24 +112,18 @@ where /// Prepare a call with message proof. pub fn make_standalone_relayer_delivery_call( message_proof: FromBridgedChainMessagesProof>>, - relayer_id_at_bridged_chain: AccountIdOf>, + relayer_id_at_bridged_chain: InboundRelayerId, ) -> Runtime::RuntimeCall where Runtime: pallet_bridge_grandpa::Config - + pallet_bridge_messages::Config< - MPI, - InboundPayload = XcmAsPlainPayload, - InboundRelayer = AccountIdOf>, - >, + + pallet_bridge_messages::Config, MPI: 'static, - >::SourceHeaderChain: SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof>>, - >, Runtime::RuntimeCall: From>, + BridgedChainOf: Chain>>, { pallet_bridge_messages::Call::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), } @@ -159,14 +141,8 @@ where Runtime: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, MPI: 'static, - >::TargetHeaderChain: TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>, - >, - >, Runtime::RuntimeCall: From>, + BridgedChainOf: Chain>>, { pallet_bridge_messages::Call::::receive_messages_delivery_proof { proof: message_delivery_proof, @@ -176,39 +152,47 @@ where } /// Prepare storage proofs of messages, stored at the (bridged) source GRANDPA chain. -pub fn make_complex_relayer_delivery_proofs( +pub fn make_complex_relayer_delivery_proofs< + BridgedChain, + ThisChainWithMessages, + InnerXcmRuntimeCall, +>( lane_id: LaneId, xcm_message: Xcm, message_nonce: MessageNonce, message_destination: Junctions, - header_number: BlockNumberOf>, + header_number: BlockNumberOf, is_minimal_call: bool, ) -> ( - HeaderOf>, - GrandpaJustification>>, - FromBridgedChainMessagesProof>>, + HeaderOf, + GrandpaJustification>, + FromBridgedChainMessagesProof>, ) where - MB: MessageBridge, - MessageBridgedChain: Send + Sync + 'static, - UnderlyingChainOf>: ChainWithGrandpa, + BridgedChain: ChainWithGrandpa, + ThisChainWithMessages: ChainWithMessages, { + // prepare message let message_payload = prepare_inbound_xcm(xcm_message, message_destination); - let message_size = StorageProofSize::Minimal(message_payload.len() as u32); - // prepare para storage proof containing message - let (state_root, storage_proof) = prepare_messages_storage_proof::( - lane_id, - message_nonce..=message_nonce, - None, - message_size, - message_payload, - encode_all_messages, - encode_lane_data, - ); + // prepare storage proof containing message + let (state_root, storage_proof) = + prepare_messages_storage_proof::( + lane_id, + message_nonce..=message_nonce, + None, + UnverifiedStorageProofParams::from_db_size(message_payload.len() as u32), + |_| message_payload.clone(), + encode_all_messages, + encode_lane_data, + false, + false, + ); - let (header, justification) = make_complex_bridged_grandpa_header_proof::< - MessageBridgedChain, - >(state_root, header_number, is_minimal_call); + let (header, justification) = make_complex_bridged_grandpa_header_proof::( + state_root, + header_number, + is_minimal_call, + ); let message_proof = FromBridgedChainMessagesProof { bridged_header_hash: header.hash(), @@ -222,44 +206,44 @@ where } /// Prepare storage proofs of message confirmations, stored at the (bridged) target GRANDPA chain. -pub fn make_complex_relayer_confirmation_proofs( +pub fn make_complex_relayer_confirmation_proofs< + BridgedChain, + ThisChainWithMessages, + InnerXcmRuntimeCall, +>( lane_id: LaneId, - header_number: BlockNumberOf>, - relayer_id_at_this_chain: AccountIdOf>, + header_number: BlockNumberOf, + relayer_id_at_this_chain: AccountIdOf, relayers_state: UnrewardedRelayersState, ) -> ( - HeaderOf>, - GrandpaJustification>>, - FromBridgedChainMessagesDeliveryProof>>, + HeaderOf, + GrandpaJustification>, + FromBridgedChainMessagesDeliveryProof>, ) where - MB: MessageBridge, - MessageBridgedChain: Send + Sync + 'static, - MessageThisChain: Send + Sync + 'static, - UnderlyingChainOf>: ChainWithGrandpa, + BridgedChain: ChainWithGrandpa, + ThisChainWithMessages: ChainWithMessages, { // prepare storage proof containing message delivery proof - let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( - lane_id, - InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: relayer_id_at_this_chain, - messages: DeliveredMessages::new(1) - }; - relayers_state.unrewarded_relayer_entries as usize - ] - .into(), - last_confirmed_nonce: 1, - }, - StorageProofSize::Minimal(0), - ); + let (state_root, storage_proof) = + prepare_message_delivery_storage_proof::( + lane_id, + InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: relayer_id_at_this_chain, + messages: DeliveredMessages::new(1) + }; + relayers_state.unrewarded_relayer_entries as usize + ] + .into(), + last_confirmed_nonce: 1, + }, + UnverifiedStorageProofParams::default(), + ); - let (header, justification) = make_complex_bridged_grandpa_header_proof::( - state_root, - header_number, - false, - ); + let (header, justification) = + make_complex_bridged_grandpa_header_proof::(state_root, header_number, false); let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { bridged_header_hash: header.hash(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs index 5d3cba4e53b5ec7ec9cd2e6141e6e95aa8928970..897fe0d0b0f17729e8bbcd8c941d08ec868ac087 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs @@ -19,61 +19,58 @@ use super::{from_grandpa_chain::make_complex_bridged_grandpa_header_proof, prepare_inbound_xcm}; use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages, LaneId, UnrewardedRelayersState, Weight, }; use bp_runtime::{ - AccountIdOf, BlockNumberOf, HeaderOf, Parachain, StorageProofSize, UnderlyingChainOf, + AccountIdOf, BlockNumberOf, Chain, HeaderOf, Parachain, UnverifiedStorageProofParams, }; use bp_test_utils::prepare_parachain_heads_proof; -use bridge_runtime_common::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, - }, - messages_generation::{ - encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, - prepare_messages_storage_proof, - }, - messages_xcm_extension::XcmAsPlainPayload, -}; +use bridge_runtime_common::messages_xcm_extension::XcmAsPlainPayload; use codec::Encode; use pallet_bridge_grandpa::BridgedHeader; use pallet_bridge_parachains::{RelayBlockHash, RelayBlockNumber}; use sp_runtime::traits::Header as HeaderT; use xcm::latest::prelude::*; +use crate::test_cases::helpers::InboundRelayerId; use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; use bp_messages::{DeliveredMessages, InboundLaneData, MessageNonce, UnrewardedRelayer}; use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; +use pallet_bridge_messages::{ + messages_generation::{ + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, + }, + BridgedChainOf, +}; use sp_runtime::SaturatedConversion; /// Prepare a batch call with relay finality proof, parachain head proof and message proof. -pub fn make_complex_relayer_delivery_batch( +pub fn make_complex_relayer_delivery_batch( relay_chain_header: BridgedHeader, grandpa_justification: GrandpaJustification>, parachain_heads: Vec<(ParaId, ParaHash)>, para_heads_proof: ParaHeadsProof, message_proof: FromBridgedChainMessagesProof, - relayer_id_at_bridged_chain: InboundRelayer, -) -> pallet_utility::Call where - Runtime:pallet_bridge_grandpa::Config + relayer_id_at_bridged_chain: InboundRelayerId, +) -> pallet_utility::Call +where + Runtime: pallet_bridge_grandpa::Config + pallet_bridge_parachains::Config - + pallet_bridge_messages::Config< - MPI, - InboundPayload = XcmAsPlainPayload, - InboundRelayer = InboundRelayer, - > + + pallet_bridge_messages::Config + pallet_utility::Config, GPI: 'static, PPI: 'static, MPI: 'static, - ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, - <>::BridgedChain as bp_runtime::Chain>::Hash: From, - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: - From>, - ::RuntimeCall: - From> + ParaHash: From< + <>::BridgedChain as bp_runtime::Chain>::Hash, + >, + <>::BridgedChain as bp_runtime::Chain>::Hash: + From, + BridgedChainOf: Chain + Parachain, + ::RuntimeCall: From> + From> + From>, { @@ -93,7 +90,7 @@ pub fn make_complex_relayer_delivery_batch::receive_messages_proof { relayer_id_at_bridged_chain: relayer_id_at_bridged_chain.into(), - proof: message_proof.into(), + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }; @@ -122,11 +119,7 @@ where MPI: 'static, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::TargetHeaderChain: TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, - >, + BridgedChainOf: Chain + Parachain, ::RuntimeCall: From> + From> + From>, @@ -160,23 +153,19 @@ where } /// Prepare a call with message proof. -pub fn make_standalone_relayer_delivery_call( +pub fn make_standalone_relayer_delivery_call( message_proof: FromBridgedChainMessagesProof, - relayer_id_at_bridged_chain: InboundRelayer, -) -> Runtime::RuntimeCall where - Runtime: pallet_bridge_messages::Config< - MPI, - InboundPayload = XcmAsPlainPayload, - InboundRelayer = InboundRelayer, - >, + relayer_id_at_bridged_chain: InboundRelayerId, +) -> Runtime::RuntimeCall +where + Runtime: pallet_bridge_messages::Config, MPI: 'static, - Runtime::RuntimeCall: From>, - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: - From>, + Runtime::RuntimeCall: From>, + BridgedChainOf: Chain + Parachain, { pallet_bridge_messages::Call::::receive_messages_proof { relayer_id_at_bridged_chain: relayer_id_at_bridged_chain.into(), - proof: message_proof.into(), + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), } @@ -192,11 +181,7 @@ where Runtime: pallet_bridge_messages::Config, MPI: 'static, Runtime::RuntimeCall: From>, - >::TargetHeaderChain: TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, - >, + BridgedChainOf: Chain + Parachain, { pallet_bridge_messages::Call::::receive_messages_delivery_proof { proof: message_delivery_proof, @@ -206,7 +191,12 @@ where } /// Prepare storage proofs of messages, stored at the source chain. -pub fn make_complex_relayer_delivery_proofs( +pub fn make_complex_relayer_delivery_proofs< + BridgedRelayChain, + BridgedParachain, + ThisChainWithMessages, + InnerXcmRuntimeCall, +>( lane_id: LaneId, xcm_message: Xcm, message_nonce: MessageNonce, @@ -226,24 +216,27 @@ pub fn make_complex_relayer_delivery_proofs + ChainWithGrandpa, - MB: MessageBridge, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, + BridgedParachain: bp_runtime::Chain + Parachain, + ThisChainWithMessages: ChainWithMessages, { + // prepare message let message_payload = prepare_inbound_xcm(xcm_message, message_destination); - let message_size = StorageProofSize::Minimal(message_payload.len() as u32); // prepare para storage proof containing message - let (para_state_root, para_storage_proof) = prepare_messages_storage_proof::( - lane_id, - message_nonce..=message_nonce, - None, - message_size, - message_payload, - encode_all_messages, - encode_lane_data, - ); + let (para_state_root, para_storage_proof) = + prepare_messages_storage_proof::( + lane_id, + message_nonce..=message_nonce, + None, + UnverifiedStorageProofParams::from_db_size(message_payload.len() as u32), + |_| message_payload.clone(), + encode_all_messages, + encode_lane_data, + false, + false, + ); let (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) = - make_complex_bridged_parachain_heads_proof::( + make_complex_bridged_parachain_heads_proof::( para_state_root, para_header_number, relay_header_number, @@ -270,12 +263,17 @@ where } /// Prepare storage proofs of message confirmations, stored at the target parachain. -pub fn make_complex_relayer_confirmation_proofs( +pub fn make_complex_relayer_confirmation_proofs< + BridgedRelayChain, + BridgedParachain, + ThisChainWithMessages, + InnerXcmRuntimeCall, +>( lane_id: LaneId, para_header_number: u32, relay_header_number: u32, bridged_para_id: u32, - relayer_id_at_this_chain: AccountIdOf>, + relayer_id_at_this_chain: AccountIdOf, relayers_state: UnrewardedRelayersState, ) -> ( HeaderOf, @@ -288,28 +286,29 @@ pub fn make_complex_relayer_confirmation_proofs + ChainWithGrandpa, - MB: MessageBridge, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, + BridgedParachain: bp_runtime::Chain + Parachain, + ThisChainWithMessages: ChainWithMessages, { // prepare para storage proof containing message delivery proof - let (para_state_root, para_storage_proof) = prepare_message_delivery_storage_proof::( - lane_id, - InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: relayer_id_at_this_chain.into(), - messages: DeliveredMessages::new(1) - }; - relayers_state.unrewarded_relayer_entries as usize - ] - .into(), - last_confirmed_nonce: 1, - }, - StorageProofSize::Minimal(0), - ); + let (para_state_root, para_storage_proof) = + prepare_message_delivery_storage_proof::( + lane_id, + InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: relayer_id_at_this_chain.into(), + messages: DeliveredMessages::new(1) + }; + relayers_state.unrewarded_relayer_entries as usize + ] + .into(), + last_confirmed_nonce: 1, + }, + UnverifiedStorageProofParams::default(), + ); let (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) = - make_complex_bridged_parachain_heads_proof::( + make_complex_bridged_parachain_heads_proof::( para_state_root, para_header_number, relay_header_number, @@ -334,7 +333,7 @@ where } /// Make bridged parachain header with given state root and relay header that is finalizing it. -pub fn make_complex_bridged_parachain_heads_proof( +pub fn make_complex_bridged_parachain_heads_proof( para_state_root: ParaHash, para_header_number: u32, relay_header_number: BlockNumberOf, @@ -350,20 +349,17 @@ pub fn make_complex_bridged_parachain_heads_proof( where BridgedRelayChain: bp_runtime::Chain + ChainWithGrandpa, - MB: MessageBridge, - ::BridgedChain: Send + Sync + 'static, - ::ThisChain: Send + Sync + 'static, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, + BridgedParachain: bp_runtime::Chain + Parachain, { let bridged_para_head = ParaHead( - bp_test_utils::test_header_with_root::>( + bp_test_utils::test_header_with_root::>( para_header_number.into(), para_state_root, ) .encode(), ); let (relay_state_root, para_heads_proof, parachain_heads) = - prepare_parachain_heads_proof::>(vec![( + prepare_parachain_heads_proof::>(vec![( bridged_para_id, bridged_para_head.clone(), )]); diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs index 9285a1e7ad4500a4c2c7db73d9966dd711d852be..ee3fc1ed2c41f1ff2a729dcf784f832a5a563a58 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs @@ -39,8 +39,8 @@ pub fn prepare_inbound_xcm( xcm_message: Xcm, destination: InteriorLocation, ) -> Vec { - let location = xcm::VersionedInteriorLocation::V4(destination); - let xcm = xcm::VersionedXcm::::V4(xcm_message); + let location = xcm::VersionedInteriorLocation::from(destination); + let xcm = xcm::VersionedXcm::::from(xcm_message); // this is the `BridgeMessage` from polkadot xcm builder, but it has no constructor // or public fields, so just tuple // (double encoding, because `.encode()` is called on original Xcm BLOB when it is pushed diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index fe4de3114be0d0b92643946f0deafe030a8b88f8..87cf42ba87d836976a8997d90ce99eea9c1f1e9f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -10,88 +10,88 @@ description = "Westend Collectives Parachain Runtime" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-asset-rate = { path = "../../../../../substrate/frame/asset-rate", default-features = false } -pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-preimage = { path = "../../../../../substrate/frame/preimage", default-features = false } -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } -pallet-scheduler = { path = "../../../../../substrate/frame/scheduler", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-treasury = { path = "../../../../../substrate/frame/treasury", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false } -pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false } -pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false } -pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-asset-rate = { workspace = true } +pallet-alliance = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-collective = { workspace = true } +pallet-multisig = { workspace = true } +pallet-preimage = { workspace = true } +pallet-proxy = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-session = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } +pallet-referenda = { workspace = true } +pallet-ranked-collective = { workspace = true } +pallet-core-fellowship = { workspace = true } +pallet-salary = { workspace = true } +sp-api = { workspace = true } +sp-arithmetic = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +westend-runtime-constants = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +pallet-collator-selection = { workspace = true } +pallet-collective-content = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dev-dependencies] -sp-io = { path = "../../../../../substrate/primitives/io", features = ["std"] } +sp-io = { features = ["std"], workspace = true, default-features = true } [features] default = ["std"] @@ -131,7 +131,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -238,7 +238,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs index ceef6de6b7435e453e9622a3d318669e1a3a9307..a052a9d3800cc6880ccee84358883194cb46eb80 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs @@ -117,6 +117,7 @@ impl pallet_ranked_collective::Config for Runtime type MinRankOfClass = sp_runtime::traits::Identity; type MemberSwappedHandler = (crate::AmbassadorCore, crate::AmbassadorSalary); type VoteWeight = pallet_ranked_collective::Linear; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (crate::AmbassadorCore, crate::AmbassadorSalary); } @@ -219,6 +220,7 @@ impl pallet_core_fellowship::Config for Runtime { >; type ApproveOrigin = PromoteOrigin; type PromoteOrigin = PromoteOrigin; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = ConstU32<65536>; type MaxRank = ConstU32<9>; } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs index 6a4a182079671f297d18bc40de0bc557911735cd..942e0c294dd022c7c4dacf4febaf99b6ac686dbf 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -30,7 +30,7 @@ use frame_support::{ parameter_types, traits::{ tokens::UnityOrOuterConversion, EitherOf, EitherOfDiverse, FromContains, MapSuccess, - NeverEnsureOrigin, OriginTrait, TryWithMorphedArg, + OriginTrait, TryWithMorphedArg, }, PalletId, }; @@ -55,8 +55,6 @@ use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; #[cfg(feature = "runtime-benchmarks")] use crate::impls::benchmarks::{OpenHrmpChannel, PayWithEnsure}; -#[cfg(feature = "runtime-benchmarks")] -use testnet_parachains_constants::westend::currency::DOLLARS; /// The Fellowship members' ranks. pub mod ranks { @@ -152,6 +150,7 @@ impl pallet_ranked_collective::Config for Runtime type MinRankOfClass = tracks::MinRankOfClass; type MemberSwappedHandler = (crate::FellowshipCore, crate::FellowshipSalary); type VoteWeight = pallet_ranked_collective::Geometric; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (crate::FellowshipCore, crate::FellowshipSalary); } @@ -209,6 +208,7 @@ impl pallet_core_fellowship::Config for Runtime { >, EnsureCanPromoteTo, >; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = ConstU32<65536>; type MaxRank = ConstU32<9>; } @@ -270,16 +270,6 @@ parameter_types! { pub SelfParaId: ParaId = ParachainInfo::parachain_id(); } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - // Benchmark bond. Needed to make `propose_spend` work. - pub const TenPercent: Permill = Permill::from_percent(10); - // Benchmark minimum. Needed to make `propose_spend` work. - pub const BenchmarkProposalBondMinimum: Balance = 1 * DOLLARS; - // Benchmark maximum. Needed to make `propose_spend` work. - pub const BenchmarkProposalBondMaximum: Balance = 10 * DOLLARS; -} - /// [`PayOverXcm`] setup to pay the Fellowship Treasury. pub type FellowshipTreasuryPaymaster = PayOverXcm< FellowshipTreasuryInteriorLocation, @@ -295,28 +285,6 @@ pub type FellowshipTreasuryPaymaster = PayOverXcm< pub type FellowshipTreasuryInstance = pallet_treasury::Instance1; impl pallet_treasury::Config for Runtime { - // The creation of proposals via the treasury pallet is deprecated and should not be utilized. - // Instead, public or fellowship referenda should be used to propose and command the treasury - // spend or spend_local dispatchables. The parameters below have been configured accordingly to - // discourage its use. - // TODO: replace with `NeverEnsure` once polkadot-sdk 1.5 is released. - type ApproveOrigin = NeverEnsureOrigin<()>; - type OnSlash = (); - #[cfg(not(feature = "runtime-benchmarks"))] - type ProposalBond = HundredPercent; - #[cfg(not(feature = "runtime-benchmarks"))] - type ProposalBondMinimum = MaxBalance; - #[cfg(not(feature = "runtime-benchmarks"))] - type ProposalBondMaximum = MaxBalance; - - #[cfg(feature = "runtime-benchmarks")] - type ProposalBond = TenPercent; - #[cfg(feature = "runtime-benchmarks")] - type ProposalBondMinimum = BenchmarkProposalBondMinimum; - #[cfg(feature = "runtime-benchmarks")] - type ProposalBondMaximum = BenchmarkProposalBondMaximum; - // end. - type WeightInfo = weights::pallet_treasury::WeightInfo; type PalletId = FellowshipTreasuryPalletId; type Currency = Balances; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 5fce8e5095410b1e07c363c0731fc9b18e9ca076..f37af88c28436cd333333dffc8d451eb177c0f58 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -70,8 +70,8 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ - fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, - EitherOfDiverse, InstanceFilter, LinearStoragePrice, TransformOrigin, + fungible::HoldConsideration, ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, + InstanceFilter, LinearStoragePrice, TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, @@ -104,7 +104,7 @@ use polkadot_runtime_common::{ impls::VersionedLocatableAsset, BlockHashCount, SlowAdjustingFeeUpdate, }; use xcm::prelude::*; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -122,7 +122,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -163,6 +163,7 @@ parameter_types! { }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); + pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. @@ -180,7 +181,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; - type SS58Prefix = ConstU16<0>; + type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -940,7 +941,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::WndLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -953,11 +954,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -972,7 +973,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -982,6 +983,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs index f40940a8b25faa7c441b1ac9b237cb34e671cf17..6bedfcc7e012383657a059b87942517644057d8a 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs @@ -58,6 +58,17 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `AmbassadorCore::Params` (r:0 w:1) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `AmbassadorCore::Member` (r:1 w:1) /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `AmbassadorCollective::Members` (r:1 w:1) @@ -160,6 +171,20 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(6)) } + fn promote_fast(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `16844` + // Estimated: `19894 + r * (2489 ยฑ0)` + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into())) + } /// Storage: `AmbassadorCollective::Members` (r:1 w:0) /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `AmbassadorCore::Member` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs index 471ee82ead729ea5abff616f0c9fe3a86704fd91..05014e273f0009bf212969ba8705879747eedd75 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs @@ -57,6 +57,17 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `FellowshipCore::Params` (r:0 w:1) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `FellowshipCore::Member` (r:1 w:1) /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `FellowshipCollective::Members` (r:1 w:1) @@ -159,6 +170,20 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(6)) } + fn promote_fast(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `16844` + // Estimated: `19894 + r * (2489 ยฑ0)` + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into())) + } /// Storage: `FellowshipCollective::Members` (r:1 w:0) /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `FellowshipCore::Member` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs index 58540e646d8c3885bf84da512e2ddc3e42abc80e..5c513c3754ce84605465aa48ac43e7731e798d2a 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs @@ -62,43 +62,6 @@ impl pallet_treasury::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `FellowshipTreasury::ProposalCount` (r:1 w:1) - /// Proof: `FellowshipTreasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `FellowshipTreasury::Proposals` (r:0 w:1) - /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `143` - // Estimated: `1489` - // Minimum execution time: 264_000_000 picoseconds. - Weight::from_parts(277_000_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipTreasury::Proposals` (r:1 w:1) - /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3593` - // Minimum execution time: 289_000_000 picoseconds. - Weight::from_parts(312_000_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(_p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `FellowshipTreasury::Approvals` (r:1 w:1) /// Proof: `FellowshipTreasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { diff --git a/cumulus/parachains/runtimes/constants/Cargo.toml b/cumulus/parachains/runtimes/constants/Cargo.toml index 561e8276b5f0543001e10fd21345ea5d3a65fee5..d54f1e7db6c167480858758bf56ed07941fb06c2 100644 --- a/cumulus/parachains/runtimes/constants/Cargo.toml +++ b/cumulus/parachains/runtimes/constants/Cargo.toml @@ -13,20 +13,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -smallvec = "1.11.0" +smallvec = { workspace = true, default-features = true } # Substrate -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-support = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -polkadot-core-primitives = { path = "../../../../polkadot/core-primitives", default-features = false } -rococo-runtime-constants = { path = "../../../../polkadot/runtime/rococo/constants", default-features = false, optional = true } -westend-runtime-constants = { path = "../../../../polkadot/runtime/westend/constants", default-features = false, optional = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } +polkadot-core-primitives = { workspace = true } +rococo-runtime-constants = { optional = true, workspace = true } +westend-runtime-constants = { optional = true, workspace = true } +xcm = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index e43a69482c79f1d67e30d6c65ea67840a7893816..4fb4bcde02351b9485dc2a5ccf6b48cc729633e7 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -13,74 +13,74 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1", optional = true } +codec = { features = ["derive"], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-insecure-randomness-collective-flip = { path = "../../../../../substrate/frame/insecure-randomness-collective-flip", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-contracts = { path = "../../../../../substrate/frame/contracts", default-features = false } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-insecure-randomness-collective-flip = { workspace = true } +pallet-balances = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +pallet-sudo = { workspace = true } +pallet-contracts = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +rococo-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } [features] default = ["std"] @@ -141,7 +141,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -171,7 +171,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 2d346e66c6c3b3f4a7e72072875646d9095a821a..d2fe0689f5155ece3c3f1397aa18e0a33583e5d6 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -50,7 +50,7 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_state, get_preset}, parameter_types, - traits::{ConstBool, ConstU16, ConstU32, ConstU64, ConstU8}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8}, weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; @@ -64,7 +64,7 @@ pub use parachains_common::{AuraId, Balance}; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; use xcm::prelude::*; use xcm_config::CollatorSelectionUpdateOrigin; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -142,7 +142,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, @@ -177,6 +177,7 @@ parameter_types! { }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); + pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. @@ -193,7 +194,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = frame_system::weights::SubstrateWeight; - type SS58Prefix = ConstU16<42>; + type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; } @@ -590,7 +591,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -603,11 +604,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -622,7 +623,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -632,6 +633,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index dc99fe331f78671b0b43842e0762db8bd96a840b..57a0782b1ef665b376d86ea8cfa750e3b0b4f007 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -10,74 +10,75 @@ license = "Apache-2.0" workspace = true [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = "0.4.1" +codec = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-broker = { path = "../../../../../substrate/frame/broker", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-broker = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +rococo-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } [features] default = ["std"] @@ -94,6 +95,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -139,7 +141,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -169,7 +171,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ @@ -199,4 +201,14 @@ try-runtime = [ "sp-runtime/try-runtime", ] -fast-runtime = [] +fast-runtime = [ + "rococo-runtime-constants/fast-runtime", +] + +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller, like logging for example. +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs index 28dacd20cf305ebdbc57eb2a30e3c98e4f8853d9..368a1e427aaafe9e45ad4370dbbaba16f5a31b06 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs @@ -13,20 +13,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); + + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .build(); +} + +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("ROC", 12) .build(); - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() + substrate_wasm_builder::WasmBuilder::init_with_defaults() .set_file_name("fast_runtime_binary.rs") .enable_feature("fast-runtime") - .import_memory() - .export_heap_base() + .enable_metadata_hash("ROC", 12) .build(); } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs index ec3a4f31202fd5f5333a1057bb06ca5dde247619..fa0c2644421e7ebc3f34a562e6aa2ea0b255df12 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -21,22 +21,65 @@ use cumulus_primitives_core::relay_chain; use frame_support::{ parameter_types, traits::{ - fungible::{Balanced, Credit}, - OnUnbalanced, + fungible::{Balanced, Credit, Inspect}, + tokens::{Fortitude, Preservation}, + DefensiveResult, OnUnbalanced, }, }; +use frame_system::Pallet as System; use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600, RCBlockNumberOf}; -use parachains_common::{AccountId, Balance, BlockNumber}; +use parachains_common::{AccountId, Balance}; +use rococo_runtime_constants::system_parachain::coretime; +use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; +use xcm_executor::traits::TransactAsset; -pub struct CreditToCollatorPot; -impl OnUnbalanced> for CreditToCollatorPot { - fn on_nonzero_unbalanced(credit: Credit) { - let staking_pot = CollatorSelection::account_id(); - let _ = >::resolve(&staking_pot, credit); +pub struct BurnCoretimeRevenue; +impl OnUnbalanced> for BurnCoretimeRevenue { + fn on_nonzero_unbalanced(amount: Credit) { + let acc = RevenueAccumulationAccount::get(); + if !System::::account_exists(&acc) { + System::::inc_providers(&acc); + } + Balances::resolve(&acc, amount).defensive_ok(); } } +type AssetTransactor = ::AssetTransactor; + +fn burn_at_relay(stash: &AccountId, value: Balance) -> Result<(), XcmError> { + let dest = Location::parent(); + let stash_location = + Junction::AccountId32 { network: None, id: stash.clone().into() }.into_location(); + let asset = Asset { id: AssetId(Location::parent()), fun: Fungible(value) }; + let dummy_xcm_context = XcmContext { origin: None, message_id: [0; 32], topic: None }; + + let withdrawn = AssetTransactor::withdraw_asset(&asset, &stash_location, None)?; + + AssetTransactor::can_check_out(&dest, &asset, &dummy_xcm_context)?; + + let parent_assets = Into::::into(withdrawn) + .reanchored(&dest, &Here.into()) + .defensive_map_err(|_| XcmError::ReanchorFailed)?; + + PolkadotXcm::send_xcm( + Here, + Location::parent(), + Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + ReceiveTeleportedAsset(parent_assets.clone()), + BurnAsset(parent_assets), + ]), + )?; + + AssetTransactor::check_out(&dest, &asset, &dummy_xcm_context); + + Ok(()) +} + /// A type containing the encoding of the coretime pallet in the Relay chain runtime. Used to /// construct any remote calls. The codec index must correspond to the index of `Coretime` in the /// `construct_runtime` of the Relay chain. @@ -66,11 +109,7 @@ enum CoretimeProviderCalls { parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); -} - -parameter_types! { - pub storage CoreCount: Option = None; - pub storage CoretimeRevenue: Option<(BlockNumber, Balance)> = None; + pub RevenueAccumulationAccount: AccountId = BrokerPalletId::get().into_sub_account_truncating(b"burnstash"); } /// Type that implements the `CoretimeInterface` for the allocation of Coretime. Meant to operate @@ -205,26 +244,30 @@ impl CoretimeInterface for CoretimeAllocator { } } - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { - let revenue = CoretimeRevenue::get(); - CoretimeRevenue::set(&None); - revenue - } + fn on_new_timeslice(_t: pallet_broker::Timeslice) { + let stash = RevenueAccumulationAccount::get(); + let value = + Balances::reducible_balance(&stash, Preservation::Expendable, Fortitude::Polite); - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance) { - CoretimeRevenue::set(&Some((when, revenue))); + if value > 0 { + log::debug!(target: "runtime::coretime", "Going to burn {value} stashed tokens at RC"); + match burn_at_relay(&stash, value) { + Ok(()) => { + log::debug!(target: "runtime::coretime", "Succesfully burnt {value} tokens"); + }, + Err(err) => { + log::error!(target: "runtime::coretime", "burn_at_relay failed: {err:?}"); + }, + } + } } } impl pallet_broker::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnRevenue = CreditToCollatorPot; - #[cfg(feature = "fast-runtime")] - type TimeslicePeriod = ConstU32<10>; - #[cfg(not(feature = "fast-runtime"))] - type TimeslicePeriod = ConstU32<80>; + type OnRevenue = BurnCoretimeRevenue; + type TimeslicePeriod = ConstU32<{ coretime::TIMESLICE_PERIOD }>; type MaxLeasedCores = ConstU32<50>; type MaxReservedCores = ConstU32<10>; type Coretime = CoretimeAllocator; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index b3eaf3d127a2f6f6acb8b161f1fcbd8bdb479d36..6e36539c7bf79a9ef31b991c432473b90d724185 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -76,7 +76,7 @@ use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, RocRelayLocation, XcmOriginToTransactDispatchOrigin, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -104,6 +104,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. @@ -117,6 +118,7 @@ pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, pallet_broker::migration::MigrateV0ToV1, pallet_broker::migration::MigrateV1ToV2, + pallet_broker::migration::MigrateV2ToV3, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -142,10 +144,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + transaction_version: 2, state_version: 1, }; @@ -660,7 +662,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::RocRelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -673,11 +675,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -692,7 +694,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -702,6 +704,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 5c9175a18d98a0b6c0c791945dcd9f2cd1892cc4..83e80e2e91e737827fce0b90fad0ec3d70ce3c33 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_092_000, 0) + // Minimum execution time: 2_024_000 picoseconds. + Weight::from_parts(2_121_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 21_943_000 picoseconds. - Weight::from_parts(22_570_000, 0) + // Minimum execution time: 21_654_000 picoseconds. + Weight::from_parts(22_591_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 20_923_000 picoseconds. - Weight::from_parts(21_354_000, 0) + // Minimum execution time: 20_769_000 picoseconds. + Weight::from_parts(21_328_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,24 +93,34 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `1951` - // Minimum execution time: 10_687_000 picoseconds. - Weight::from_parts(11_409_000, 0) + // Minimum execution time: 10_404_000 picoseconds. + Weight::from_parts(10_941_000, 0) .saturating_add(Weight::from_parts(0, 1951)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::LastRelayChainBlockNumber` (r:1 w:0) /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) - /// Storage: `Broker::Leases` (r:1 w:1) - /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -120,33 +130,34 @@ impl pallet_broker::WeightInfo for WeightInfo { /// The range of component `n` is `[0, 1000]`. fn start_sales(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `12567` - // Estimated: `14052` - // Minimum execution time: 111_288_000 picoseconds. - Weight::from_parts(117_804_282, 0) - .saturating_add(Weight::from_parts(0, 14052)) - // Standard Error: 391 - .saturating_add(Weight::from_parts(1_243, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(66)) + // Measured: `12599` + // Estimated: `15065 + n * (1 ยฑ0)` + // Minimum execution time: 44_085_000 picoseconds. + Weight::from_parts(127_668_002, 0) + .saturating_add(Weight::from_parts(0, 15065)) + // Standard Error: 2_231 + .saturating_add(Weight::from_parts(20_604, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(59)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `316` + // Measured: `332` // Estimated: `3593` - // Minimum execution time: 33_006_000 picoseconds. - Weight::from_parts(34_256_000, 0) + // Minimum execution time: 45_100_000 picoseconds. + Weight::from_parts(46_263_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -156,53 +167,53 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `553` // Estimated: `4698` - // Minimum execution time: 61_473_000 picoseconds. - Weight::from_parts(66_476_000, 0) + // Minimum execution time: 65_944_000 picoseconds. + Weight::from_parts(68_666_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 13_771_000 picoseconds. - Weight::from_parts(14_374_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 13_794_000 picoseconds. + Weight::from_parts(14_450_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Regions` (r:1 w:2) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn partition() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 15_162_000 picoseconds. - Weight::from_parts(15_742_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 15_316_000 picoseconds. + Weight::from_parts(15_787_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::Regions` (r:1 w:3) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 16_196_000 picoseconds. - Weight::from_parts(16_796_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 16_375_000 picoseconds. + Weight::from_parts(17_113_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,15 +222,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn assign() -> Weight { // Proof Size summary in bytes: - // Measured: `936` + // Measured: `937` // Estimated: `4681` - // Minimum execution time: 25_653_000 picoseconds. - Weight::from_parts(27_006_000, 0) + // Minimum execution time: 25_952_000 picoseconds. + Weight::from_parts(27_198_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -227,7 +238,7 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolIo` (r:2 w:2) @@ -236,10 +247,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn pool() -> Weight { // Proof Size summary in bytes: - // Measured: `1002` + // Measured: `1003` // Estimated: `5996` - // Minimum execution time: 31_114_000 picoseconds. - Weight::from_parts(32_235_000, 0) + // Minimum execution time: 31_790_000 picoseconds. + Weight::from_parts(32_920_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -255,11 +266,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 57_280_000 picoseconds. - Weight::from_parts(58_127_480, 0) + // Minimum execution time: 56_286_000 picoseconds. + Weight::from_parts(56_946_240, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 41_670 - .saturating_add(Weight::from_parts(1_203_066, 0).saturating_mul(m.into())) + // Standard Error: 44_472 + .saturating_add(Weight::from_parts(1_684_838, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -279,25 +290,25 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `3680` - // Minimum execution time: 59_968_000 picoseconds. - Weight::from_parts(62_315_000, 0) - .saturating_add(Weight::from_parts(0, 3680)) + // Measured: `322` + // Estimated: `3787` + // Minimum execution time: 64_967_000 picoseconds. + Weight::from_parts(66_504_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn drop_region() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `3550` - // Minimum execution time: 50_887_000 picoseconds. - Weight::from_parts(57_366_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `466` + // Estimated: `3551` + // Minimum execution time: 37_552_000 picoseconds. + Weight::from_parts(46_263_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -311,8 +322,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 84_472_000 picoseconds. - Weight::from_parts(96_536_000, 0) + // Minimum execution time: 79_625_000 picoseconds. + Weight::from_parts(86_227_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -329,8 +340,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 96_371_000 picoseconds. - Weight::from_parts(104_659_000, 0) + // Minimum execution time: 88_005_000 picoseconds. + Weight::from_parts(92_984_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -343,8 +354,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `957` // Estimated: `4698` - // Minimum execution time: 51_741_000 picoseconds. - Weight::from_parts(54_461_000, 0) + // Minimum execution time: 38_877_000 picoseconds. + Weight::from_parts(40_408_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -360,13 +371,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 19_901_000 picoseconds. - Weight::from_parts(21_028_116, 0) + // Minimum execution time: 20_581_000 picoseconds. + Weight::from_parts(21_610_297, 0) .saturating_add(Weight::from_parts(0, 3539)) + // Standard Error: 119 + .saturating_add(Weight::from_parts(144, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -377,29 +390,29 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_987_000 picoseconds. - Weight::from_parts(6_412_478, 0) + // Minimum execution time: 6_079_000 picoseconds. + Weight::from_parts(6_540_110, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 16 - .saturating_add(Weight::from_parts(47, 0).saturating_mul(n.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(10, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `447` + // Measured: `442` // Estimated: `6196` - // Minimum execution time: 38_623_000 picoseconds. - Weight::from_parts(39_773_000, 0) + // Minimum execution time: 42_947_000 picoseconds. + Weight::from_parts(43_767_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -412,13 +425,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:60) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(_n: u32, ) -> Weight { + fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12514` // Estimated: `13506` - // Minimum execution time: 97_074_000 picoseconds. - Weight::from_parts(101_247_740, 0) + // Minimum execution time: 93_426_000 picoseconds. + Weight::from_parts(96_185_447, 0) .saturating_add(Weight::from_parts(0, 13506)) + // Standard Error: 116 + .saturating_add(Weight::from_parts(4, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(65)) } @@ -430,8 +445,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 6_317_000 picoseconds. - Weight::from_parts(6_521_000, 0) + // Minimum execution time: 5_842_000 picoseconds. + Weight::from_parts(6_077_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -454,8 +469,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 32_575_000 picoseconds. - Weight::from_parts(33_299_000, 0) + // Minimum execution time: 33_278_000 picoseconds. + Weight::from_parts(34_076_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -474,8 +489,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 15_256_000 picoseconds. - Weight::from_parts(15_927_000, 0) + // Minimum execution time: 15_779_000 picoseconds. + Weight::from_parts(16_213_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -486,8 +501,19 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_783_000 picoseconds. - Weight::from_parts(1_904_000, 0) + // Minimum execution time: 1_774_000 picoseconds. + Weight::from_parts(1_873_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::RevenueInbox` (r:0 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + fn notify_revenue() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_858_000 picoseconds. + Weight::from_parts(1_991_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -497,19 +523,19 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `3863` - // Minimum execution time: 12_307_000 picoseconds. - Weight::from_parts(12_967_000, 0) - .saturating_add(Weight::from_parts(0, 3863)) + // Measured: `408` + // Estimated: `1893` + // Minimum execution time: 10_874_000 picoseconds. + Weight::from_parts(11_265_000, 0) + .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) @@ -517,10 +543,32 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `470` // Estimated: `1886` - // Minimum execution time: 6_597_000 picoseconds. - Weight::from_parts(6_969_000, 0) + // Minimum execution time: 6_525_000 picoseconds. + Weight::from_parts(6_769_000, 0) .saturating_add(Weight::from_parts(0, 1886)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `322` + // Estimated: `3787` + // Minimum execution time: 45_561_000 picoseconds. + Weight::from_parts(47_306_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 78018537f5d3ce07d75ed8a465d5e17dba157028..d3bf6b43a7ed7453e3c12dc8ecdafeee787e04b5 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -10,74 +10,75 @@ license = "Apache-2.0" workspace = true [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = "0.4.1" +codec = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-broker = { path = "../../../../../substrate/frame/broker", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-broker = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +westend-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } [features] default = ["std"] @@ -94,6 +95,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -138,7 +140,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -167,7 +169,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ @@ -196,4 +198,14 @@ try-runtime = [ "sp-runtime/try-runtime", ] -fast-runtime = [] +fast-runtime = [ + "westend-runtime-constants/fast-runtime", +] + +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller, like logging for example. +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/build.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/build.rs index 28dacd20cf305ebdbc57eb2a30e3c98e4f8853d9..2f10a39d1b2e238e29ab8a726c808c713914a0b7 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/build.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/build.rs @@ -13,20 +13,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); + + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .build(); +} + +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) .build(); - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() + substrate_wasm_builder::WasmBuilder::init_with_defaults() .set_file_name("fast_runtime_binary.rs") .enable_feature("fast-runtime") - .import_memory() - .export_heap_base() + .enable_metadata_hash("WND", 12) .build(); } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs index a5e219b9897e0710e008ffa6b800624710877bcc..4f06e3e3669c8d69fcb990e55eb7c9e83432e96f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs @@ -21,22 +21,67 @@ use cumulus_primitives_core::relay_chain; use frame_support::{ parameter_types, traits::{ - fungible::{Balanced, Credit}, - OnUnbalanced, + fungible::{Balanced, Credit, Inspect}, + tokens::{Fortitude, Preservation}, + DefensiveResult, OnUnbalanced, }, }; -use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600, RCBlockNumberOf}; -use parachains_common::{AccountId, Balance, BlockNumber}; +use frame_system::Pallet as System; +use pallet_broker::{ + CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600, RCBlockNumberOf, Timeslice, +}; +use parachains_common::{AccountId, Balance}; +use sp_runtime::traits::AccountIdConversion; +use westend_runtime_constants::system_parachain::coretime; use xcm::latest::prelude::*; +use xcm_executor::traits::TransactAsset; -pub struct CreditToCollatorPot; -impl OnUnbalanced> for CreditToCollatorPot { - fn on_nonzero_unbalanced(credit: Credit) { - let staking_pot = CollatorSelection::account_id(); - let _ = >::resolve(&staking_pot, credit); +pub struct BurnCoretimeRevenue; +impl OnUnbalanced> for BurnCoretimeRevenue { + fn on_nonzero_unbalanced(amount: Credit) { + let acc = RevenueAccumulationAccount::get(); + if !System::::account_exists(&acc) { + System::::inc_providers(&acc); + } + Balances::resolve(&acc, amount).defensive_ok(); } } +type AssetTransactor = ::AssetTransactor; + +fn burn_at_relay(stash: &AccountId, value: Balance) -> Result<(), XcmError> { + let dest = Location::parent(); + let stash_location = + Junction::AccountId32 { network: None, id: stash.clone().into() }.into_location(); + let asset = Asset { id: AssetId(Location::parent()), fun: Fungible(value) }; + let dummy_xcm_context = XcmContext { origin: None, message_id: [0; 32], topic: None }; + + let withdrawn = AssetTransactor::withdraw_asset(&asset, &stash_location, None)?; + + AssetTransactor::can_check_out(&dest, &asset, &dummy_xcm_context)?; + + let parent_assets = Into::::into(withdrawn) + .reanchored(&dest, &Here.into()) + .defensive_map_err(|_| XcmError::ReanchorFailed)?; + + PolkadotXcm::send_xcm( + Here, + Location::parent(), + Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + ReceiveTeleportedAsset(parent_assets.clone()), + BurnAsset(parent_assets), + ]), + )?; + + AssetTransactor::check_out(&dest, &asset, &dummy_xcm_context); + + Ok(()) +} + /// A type containing the encoding of the coretime pallet in the Relay chain runtime. Used to /// construct any remote calls. The codec index must correspond to the index of `Coretime` in the /// `construct_runtime` of the Relay chain. @@ -66,11 +111,7 @@ enum CoretimeProviderCalls { parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); -} - -parameter_types! { - pub storage CoreCount: Option = None; - pub storage CoretimeRevenue: Option<(BlockNumber, Balance)> = None; + pub RevenueAccumulationAccount: AccountId = BrokerPalletId::get().into_sub_account_truncating(b"burnstash"); } /// Type that implements the `CoretimeInterface` for the allocation of Coretime. Meant to operate @@ -217,26 +258,30 @@ impl CoretimeInterface for CoretimeAllocator { } } - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { - let revenue = CoretimeRevenue::get(); - CoretimeRevenue::set(&None); - revenue - } + fn on_new_timeslice(_timeslice: Timeslice) { + let stash = RevenueAccumulationAccount::get(); + let value = + Balances::reducible_balance(&stash, Preservation::Expendable, Fortitude::Polite); - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance) { - CoretimeRevenue::set(&Some((when, revenue))); + if value > 0 { + log::debug!(target: "runtime::coretime", "Going to burn {value} stashed tokens at RC"); + match burn_at_relay(&stash, value) { + Ok(()) => { + log::debug!(target: "runtime::coretime", "Succesfully burnt {value} tokens"); + }, + Err(err) => { + log::error!(target: "runtime::coretime", "burn_at_relay failed: {err:?}"); + }, + } + } } } impl pallet_broker::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnRevenue = CreditToCollatorPot; - #[cfg(feature = "fast-runtime")] - type TimeslicePeriod = ConstU32<10>; - #[cfg(not(feature = "fast-runtime"))] - type TimeslicePeriod = ConstU32<80>; + type OnRevenue = BurnCoretimeRevenue; + type TimeslicePeriod = ConstU32<{ coretime::TIMESLICE_PERIOD }>; // We don't actually need any leases at launch but set to 10 in case we want to sudo some in. type MaxLeasedCores = ConstU32<10>; type MaxReservedCores = ConstU32<10>; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 6c22702ce872fe28e5acc9783c263bedbc135dee..74fdd971f5ce05ba87ec3c1cbf2ac3d233a4d73b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -76,7 +76,7 @@ use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, TokenRelayLocation, XcmOriginToTransactDispatchOrigin, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -104,6 +104,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. @@ -116,6 +117,7 @@ pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, pallet_broker::migration::MigrateV0ToV1, pallet_broker::migration::MigrateV1ToV2, + pallet_broker::migration::MigrateV2ToV3, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -141,10 +143,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + transaction_version: 2, state_version: 1, }; @@ -651,7 +653,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenRelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -664,11 +666,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -683,7 +685,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -693,6 +695,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index 7e1c832a90924e39c7bc7d7b24d8163ce5d65589..d130b306f7a52b75958db353aea24e9e113c82d5 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_897_000 picoseconds. - Weight::from_parts(2_053_000, 0) + // Minimum execution time: 1_899_000 picoseconds. + Weight::from_parts(2_051_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 22_550_000 picoseconds. - Weight::from_parts(22_871_000, 0) + // Minimum execution time: 21_965_000 picoseconds. + Weight::from_parts(22_774_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 21_170_000 picoseconds. - Weight::from_parts(21_645_000, 0) + // Minimum execution time: 20_748_000 picoseconds. + Weight::from_parts(21_464_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,24 +93,34 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `1631` - // Minimum execution time: 10_494_000 picoseconds. - Weight::from_parts(10_942_000, 0) + // Minimum execution time: 10_269_000 picoseconds. + Weight::from_parts(10_508_000, 0) .saturating_add(Weight::from_parts(0, 1631)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::LastRelayChainBlockNumber` (r:1 w:0) /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) - /// Storage: `Broker::Leases` (r:1 w:1) - /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -118,15 +128,18 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:20) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(_n: u32, ) -> Weight { + fn start_sales(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `12247` - // Estimated: `13732` - // Minimum execution time: 61_014_000 picoseconds. - Weight::from_parts(63_267_651, 0) - .saturating_add(Weight::from_parts(0, 13732)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `12279` + // Estimated: `14805 + n * (1 ยฑ0)` + // Minimum execution time: 41_900_000 picoseconds. + Weight::from_parts(80_392_728, 0) + .saturating_add(Weight::from_parts(0, 14805)) + // Standard Error: 870 + .saturating_add(Weight::from_parts(4_361, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(26)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) @@ -135,13 +148,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `316` + // Measured: `332` // Estimated: `3593` - // Minimum execution time: 30_931_000 picoseconds. - Weight::from_parts(31_941_000, 0) + // Minimum execution time: 40_911_000 picoseconds. + Weight::from_parts(43_102_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -160,47 +173,47 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `450` // Estimated: `4698` - // Minimum execution time: 57_466_000 picoseconds. - Weight::from_parts(65_042_000, 0) + // Minimum execution time: 70_257_000 picoseconds. + Weight::from_parts(73_889_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 12_799_000 picoseconds. - Weight::from_parts(13_401_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 13_302_000 picoseconds. + Weight::from_parts(13_852_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Regions` (r:1 w:2) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn partition() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 14_107_000 picoseconds. - Weight::from_parts(14_630_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 14_927_000 picoseconds. + Weight::from_parts(15_553_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::Regions` (r:1 w:3) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 15_254_000 picoseconds. - Weight::from_parts(16_062_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 16_237_000 picoseconds. + Weight::from_parts(16_995_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -209,15 +222,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn assign() -> Weight { // Proof Size summary in bytes: - // Measured: `735` + // Measured: `736` // Estimated: `4681` - // Minimum execution time: 23_557_000 picoseconds. - Weight::from_parts(24_382_000, 0) + // Minimum execution time: 24_621_000 picoseconds. + Weight::from_parts(25_165_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -225,7 +238,7 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolIo` (r:2 w:2) @@ -234,10 +247,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn pool() -> Weight { // Proof Size summary in bytes: - // Measured: `801` + // Measured: `802` // Estimated: `5996` - // Minimum execution time: 29_371_000 picoseconds. - Weight::from_parts(30_200_000, 0) + // Minimum execution time: 29_832_000 picoseconds. + Weight::from_parts(30_894_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -253,11 +266,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 54_331_000 picoseconds. - Weight::from_parts(55_322_165, 0) + // Minimum execution time: 55_390_000 picoseconds. + Weight::from_parts(56_124_789, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 35_225 - .saturating_add(Weight::from_parts(1_099_614, 0).saturating_mul(m.into())) + // Standard Error: 41_724 + .saturating_add(Weight::from_parts(1_551_266, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -277,25 +290,25 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `3680` - // Minimum execution time: 53_789_000 picoseconds. - Weight::from_parts(55_439_000, 0) - .saturating_add(Weight::from_parts(0, 3680)) + // Measured: `320` + // Estimated: `3785` + // Minimum execution time: 59_759_000 picoseconds. + Weight::from_parts(61_310_000, 0) + .saturating_add(Weight::from_parts(0, 3785)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn drop_region() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `3550` - // Minimum execution time: 43_941_000 picoseconds. - Weight::from_parts(49_776_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `466` + // Estimated: `3551` + // Minimum execution time: 37_007_000 picoseconds. + Weight::from_parts(51_927_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -309,8 +322,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 64_917_000 picoseconds. - Weight::from_parts(70_403_000, 0) + // Minimum execution time: 86_563_000 picoseconds. + Weight::from_parts(91_274_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -327,8 +340,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 72_633_000 picoseconds. - Weight::from_parts(79_305_000, 0) + // Minimum execution time: 93_655_000 picoseconds. + Weight::from_parts(98_160_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -341,8 +354,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `556` // Estimated: `4698` - // Minimum execution time: 36_643_000 picoseconds. - Weight::from_parts(48_218_000, 0) + // Minimum execution time: 33_985_000 picoseconds. + Weight::from_parts(43_618_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -358,13 +371,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 17_617_000 picoseconds. - Weight::from_parts(18_904_788, 0) + // Minimum execution time: 18_778_000 picoseconds. + Weight::from_parts(19_543_425, 0) .saturating_add(Weight::from_parts(0, 3539)) + // Standard Error: 41 + .saturating_add(Weight::from_parts(33, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -375,26 +390,26 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_575_000 picoseconds. - Weight::from_parts(5_887_598, 0) + // Minimum execution time: 5_505_000 picoseconds. + Weight::from_parts(5_982_015, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 16 - .saturating_add(Weight::from_parts(41, 0).saturating_mul(n.into())) + // Standard Error: 13 + .saturating_add(Weight::from_parts(44, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `447` + // Measured: `442` // Estimated: `6196` - // Minimum execution time: 36_415_000 picoseconds. - Weight::from_parts(37_588_000, 0) + // Minimum execution time: 38_128_000 picoseconds. + Weight::from_parts(40_979_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -414,11 +429,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12194` // Estimated: `13506` - // Minimum execution time: 48_362_000 picoseconds. - Weight::from_parts(49_616_106, 0) + // Minimum execution time: 49_041_000 picoseconds. + Weight::from_parts(50_522_788, 0) .saturating_add(Weight::from_parts(0, 13506)) - // Standard Error: 61 - .saturating_add(Weight::from_parts(59, 0).saturating_mul(n.into())) + // Standard Error: 72 + .saturating_add(Weight::from_parts(78, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(25)) } @@ -430,8 +445,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 6_148_000 picoseconds. - Weight::from_parts(6_374_000, 0) + // Minimum execution time: 5_903_000 picoseconds. + Weight::from_parts(6_202_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -454,8 +469,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 30_267_000 picoseconds. - Weight::from_parts(30_825_000, 0) + // Minimum execution time: 31_412_000 picoseconds. + Weight::from_parts(31_964_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -474,8 +489,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 13_491_000 picoseconds. - Weight::from_parts(13_949_000, 0) + // Minimum execution time: 14_098_000 picoseconds. + Weight::from_parts(14_554_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -486,8 +501,19 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_711_000 picoseconds. - Weight::from_parts(1_913_000, 0) + // Minimum execution time: 1_723_000 picoseconds. + Weight::from_parts(1_822_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::RevenueInbox` (r:0 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + fn notify_revenue() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_865_000 picoseconds. + Weight::from_parts(1_983_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -497,19 +523,19 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `3863` - // Minimum execution time: 12_035_000 picoseconds. - Weight::from_parts(12_383_000, 0) - .saturating_add(Weight::from_parts(0, 3863)) + // Measured: `408` + // Estimated: `1893` + // Minimum execution time: 10_387_000 picoseconds. + Weight::from_parts(10_819_000, 0) + .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) @@ -517,10 +543,21 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `150` // Estimated: `1566` - // Minimum execution time: 6_142_000 picoseconds. - Weight::from_parts(6_538_000, 0) + // Minimum execution time: 5_996_000 picoseconds. + Weight::from_parts(6_278_000, 0) .saturating_add(Weight::from_parts(0, 1566)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 2_187_000 picoseconds. + Weight::from_parts(2_372_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 92a5bbbd1376088909f315371dff6be13ffa69af..c201c8375be0d900d3797b11d2597e120eef873c 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -10,54 +10,54 @@ description = "Glutton parachain runtime." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-glutton = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +pallet-message-queue = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-timestamp = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 4092fb78594d205d8ad9ddf7d8268c4fa4db27db..a204bb7276cfe3b58e5d4b9c9737eeaa5a018128 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -296,6 +296,7 @@ pub type SignedExtra = ( frame_system::CheckGenesis, frame_system::CheckEra, frame_system::CheckNonce, + frame_system::CheckWeight, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index d4e65da3cd6426f7a21a2c96d03d53d8161410fb..890de672e0b5ed74a11435a28dc9f81bbde9297b 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -7,74 +7,74 @@ description = "Rococo's People parachain runtime" license = "Apache-2.0" [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -enumflags2 = { version = "0.7.7" } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive"], workspace = true } +enumflags2 = { workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-identity = { path = "../../../../../substrate/frame/identity", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-identity = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +rococo-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } [features] default = ["std"] @@ -136,7 +136,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -165,7 +165,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index c80f6879fb3448b7b20dd81c387d3ce6c3a3ea90..ff31aba8a27716120f455414058d1aa6e17c95e6 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -70,7 +70,7 @@ use xcm_config::{ FellowshipLocation, GovernanceLocation, PriceForSiblingParachainDelivery, XcmConfig, XcmOriginToTransactDispatchOrigin, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -132,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -625,7 +625,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -638,11 +638,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -657,7 +657,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -667,6 +667,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index b040613d19e75a51d3008af0f71aa7953e82be29..83068e489d2ebb3919d20ac7747d5236ceb5aeb7 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -7,74 +7,74 @@ description = "Westend's People parachain runtime" license = "Apache-2.0" [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -enumflags2 = { version = "0.7.7" } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive"], workspace = true } +enumflags2 = { workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-identity = { path = "../../../../../substrate/frame/identity", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-identity = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +westend-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } [features] default = ["std"] @@ -136,7 +136,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -165,7 +165,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 06c938b8a40c31ac31c2557f480017ba385e157b..6adaa4b4e50201ae1a073d25a4021ec8e84bfa8a 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -70,7 +70,7 @@ use xcm_config::{ FellowshipLocation, GovernanceLocation, PriceForSiblingParachainDelivery, XcmConfig, XcmOriginToTransactDispatchOrigin, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -132,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -625,7 +625,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -638,11 +638,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -657,7 +657,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -667,6 +667,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 910944f54a5ff3433f11fab1d33aa0e88abc35b2..8a7c5922362e888723826a04d31b941264a0e5b8 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -10,41 +10,41 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-aura = { workspace = true } +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-solo-to-para = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-timestamp = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 7a7fad537ac302a9e71889c949a04aacc79df3f3..4a1271ca658406b2f4d6c100a70e1ca7b46ef8eb 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -10,45 +10,45 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-timestamp = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } +pallet-message-queue = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-primitives-core = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index c081bac4babe87413c40869917715d03c0c71a86..5e895271ab17f8d9d4cf13cded7f1ebc9716000e 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -10,41 +10,41 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } # Substrate -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-features = false } -sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../pallets/xcmp-queue", default-features = false } -pallet-collator-selection = { path = "../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } -cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } -cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent", default-features = false } -cumulus-test-relay-sproof-builder = { path = "../../../test/relay-sproof-builder", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-parachain-inherent = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } -pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../polkadot/parachain", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" +hex-literal = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 3262233053e7e130c230adbc0aef857572756b1d..bdd0dfac6065759d10108a7961dbaf987e0ebb16 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -15,70 +15,70 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1", optional = true } +codec = { features = ["derive"], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -smallvec = "1.11.0" +scale-info = { features = ["derive"], workspace = true } +smallvec = { workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-asset-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +polkadot-primitives = { workspace = true } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -assets-common = { path = "../../assets/common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +assets-common = { workspace = true } [features] default = ["std"] @@ -135,7 +135,7 @@ std = [ "substrate-wasm-builder", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -166,7 +166,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index e77416e6cd5b681a83b3d8b0571401ef1543004b..8d03f8332764f4a1a478ab4bef9c3dfd663e7169 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -85,7 +85,7 @@ use xcm::{ latest::prelude::{AssetId as AssetLocationId, BodyId}, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -847,7 +847,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetLocationId(xcm_config::RelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -860,11 +860,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -879,7 +879,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { use xcm_builder::InspectMessageQueues; use xcm_executor::RecordXcm; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index cf734345a976f027ae31f2f38735b9b7aac59f7d..7cbb614babe7988831db0898e6b08242cc66efe0 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -10,60 +10,60 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-assets = { workspace = true } +pallet-aura = { workspace = true } +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +polkadot-runtime-common = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-ping = { path = "../../../pallets/ping", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-ping = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } +parachain-info = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index fd4716ab972e8e8f6d3d1a3ca6aca74df7263fa7..40f2b78ffd6d5201ff4f8b2c04016f3f0261f71a 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 639b8b3d4dcf18f8d9601d0873f99b0278939f2d..7085211dad26aa7a916f9a71f9afab79793688c0 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -15,113 +15,113 @@ name = "polkadot-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.79" -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.28" -hex-literal = "0.4.1" +async-trait = { workspace = true } +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } +color-print = { workspace = true } +futures = { workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } # Local -rococo-parachain-runtime = { path = "../parachains/runtimes/testing/rococo-parachain" } -shell-runtime = { path = "../parachains/runtimes/starters/shell" } -glutton-westend-runtime = { path = "../parachains/runtimes/glutton/glutton-westend" } -seedling-runtime = { path = "../parachains/runtimes/starters/seedling" } -asset-hub-rococo-runtime = { path = "../parachains/runtimes/assets/asset-hub-rococo" } -asset-hub-westend-runtime = { path = "../parachains/runtimes/assets/asset-hub-westend" } -collectives-westend-runtime = { path = "../parachains/runtimes/collectives/collectives-westend" } -contracts-rococo-runtime = { path = "../parachains/runtimes/contracts/contracts-rococo" } -bridge-hub-rococo-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-rococo" } -coretime-rococo-runtime = { path = "../parachains/runtimes/coretime/coretime-rococo" } -coretime-westend-runtime = { path = "../parachains/runtimes/coretime/coretime-westend" } -bridge-hub-westend-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-westend" } -penpal-runtime = { path = "../parachains/runtimes/testing/penpal" } -jsonrpsee = { version = "0.22", features = ["server"] } -people-rococo-runtime = { path = "../parachains/runtimes/people/people-rococo" } -people-westend-runtime = { path = "../parachains/runtimes/people/people-westend" } -parachains-common = { path = "../parachains/common" } -testnet-parachains-constants = { path = "../parachains/runtimes/constants", default-features = false, features = [ +rococo-parachain-runtime = { workspace = true } +shell-runtime = { workspace = true } +glutton-westend-runtime = { workspace = true } +seedling-runtime = { workspace = true } +asset-hub-rococo-runtime = { workspace = true, default-features = true } +asset-hub-westend-runtime = { workspace = true } +collectives-westend-runtime = { workspace = true } +contracts-rococo-runtime = { workspace = true } +bridge-hub-rococo-runtime = { workspace = true, default-features = true } +coretime-rococo-runtime = { workspace = true } +coretime-westend-runtime = { workspace = true } +bridge-hub-westend-runtime = { workspace = true, default-features = true } +penpal-runtime = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } +people-rococo-runtime = { workspace = true } +people-westend-runtime = { workspace = true } +parachains-common = { workspace = true, default-features = true } +testnet-parachains-constants = { features = [ "rococo", "westend", -] } +], workspace = true } # Substrate -frame-benchmarking = { path = "../../substrate/frame/benchmarking" } -frame-benchmarking-cli = { path = "../../substrate/utils/frame/benchmarking-cli" } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -sp-io = { path = "../../substrate/primitives/io" } -sp-core = { path = "../../substrate/primitives/core" } -sp-session = { path = "../../substrate/primitives/session" } -frame-try-runtime = { path = "../../substrate/frame/try-runtime", optional = true } -sc-consensus = { path = "../../substrate/client/consensus/common" } -sp-tracing = { path = "../../substrate/primitives/tracing" } -frame-support = { path = "../../substrate/frame/support" } -sc-cli = { path = "../../substrate/client/cli" } -sc-client-api = { path = "../../substrate/client/api" } -sc-executor = { path = "../../substrate/client/executor" } -sc-service = { path = "../../substrate/client/service" } -sc-telemetry = { path = "../../substrate/client/telemetry" } -sc-transaction-pool = { path = "../../substrate/client/transaction-pool" } -sp-transaction-pool = { path = "../../substrate/primitives/transaction-pool" } -sc-network = { path = "../../substrate/client/network" } -sc-network-sync = { path = "../../substrate/client/network/sync" } -sc-basic-authorship = { path = "../../substrate/client/basic-authorship" } -sp-timestamp = { path = "../../substrate/primitives/timestamp" } -sp-blockchain = { path = "../../substrate/primitives/blockchain" } -sp-genesis-builder = { path = "../../substrate/primitives/genesis-builder", default-features = false } -sp-block-builder = { path = "../../substrate/primitives/block-builder" } -sp-keystore = { path = "../../substrate/primitives/keystore" } -sc-chain-spec = { path = "../../substrate/client/chain-spec" } -sc-rpc = { path = "../../substrate/client/rpc" } -sp-version = { path = "../../substrate/primitives/version" } -sc-tracing = { path = "../../substrate/client/tracing" } -sp-offchain = { path = "../../substrate/primitives/offchain" } -frame-system-rpc-runtime-api = { path = "../../substrate/frame/system/rpc/runtime-api" } -pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { path = "../../substrate/frame/transaction-payment/rpc/runtime-api" } -sp-std = { path = "../../substrate/primitives/std" } -sp-inherents = { path = "../../substrate/primitives/inherents" } -sp-api = { path = "../../substrate/primitives/api" } -sp-consensus-aura = { path = "../../substrate/primitives/consensus/aura" } -sc-sysinfo = { path = "../../substrate/client/sysinfo" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../substrate/utils/prometheus" } -sc-transaction-pool-api = { path = "../../substrate/client/transaction-pool/api" } -substrate-frame-rpc-system = { path = "../../substrate/utils/frame/rpc/system" } -pallet-transaction-payment-rpc = { path = "../../substrate/frame/transaction-payment/rpc" } -substrate-state-trie-migration-rpc = { path = "../../substrate/utils/frame/rpc/state-trie-migration-rpc" } +frame-benchmarking = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +frame-try-runtime = { optional = true, workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true } +sp-block-builder = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +substrate-state-trie-migration-rpc = { workspace = true, default-features = true } # Polkadot # Use rococo-native as this is currently the default "local" relay chain -polkadot-cli = { path = "../../polkadot/cli", features = ["rococo-native"] } -polkadot-primitives = { path = "../../polkadot/primitives" } -polkadot-service = { path = "../../polkadot/node/service" } -xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } +polkadot-cli = { features = ["rococo-native", "westend-native"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } # Cumulus -cumulus-client-cli = { path = "../client/cli" } -cumulus-client-collator = { path = "../client/collator" } -cumulus-client-consensus-aura = { path = "../client/consensus/aura" } -cumulus-client-consensus-relay-chain = { path = "../client/consensus/relay-chain" } -cumulus-client-consensus-common = { path = "../client/consensus/common" } -cumulus-client-consensus-proposer = { path = "../client/consensus/proposer" } -cumulus-client-parachain-inherent = { path = "../client/parachain-inherent" } -cumulus-client-service = { path = "../client/service" } -cumulus-primitives-aura = { path = "../primitives/aura" } -cumulus-primitives-core = { path = "../primitives/core" } -cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" } -color-print = "0.3.4" +cumulus-client-cli = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } +cumulus-client-consensus-aura = { workspace = true, default-features = true } +cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-client-service = { workspace = true, default-features = true } +cumulus-primitives-aura = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } [build-dependencies] -substrate-build-script-utils = { path = "../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [dev-dependencies] -assert_cmd = "2.0" -nix = { version = "0.28.0", features = ["signal"] } -tempfile = "3.8.0" +assert_cmd = { workspace = true } +nix = { features = ["signal"], workspace = true } +tempfile = { workspace = true } tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } -wait-timeout = "0.2" +wait-timeout = { workspace = true } [features] default = [] @@ -171,4 +171,8 @@ try-runtime = [ "shell-runtime/try-runtime", "sp-runtime/try-runtime", ] -fast-runtime = ["bridge-hub-rococo-runtime/fast-runtime"] +fast-runtime = [ + "bridge-hub-rococo-runtime/fast-runtime", + "coretime-rococo-runtime/fast-runtime", + "coretime-westend-runtime/fast-runtime", +] diff --git a/cumulus/polkadot-parachain/src/cli.rs b/cumulus/polkadot-parachain/src/cli.rs index f7d2fd0f0be3c938ac5a9376d6896a0b073fa364..d06354dda22057fda87bb21e47340e38835abb64 100644 --- a/cumulus/polkadot-parachain/src/cli.rs +++ b/cumulus/polkadot-parachain/src/cli.rs @@ -14,6 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use crate::common::NodeExtraArgs; +use clap::{Command, CommandFactory, FromArgMatches}; +use sc_cli::SubstrateCli; use std::path::PathBuf; /// Sub-commands supported by the collator. @@ -57,22 +60,13 @@ pub enum Subcommand { Benchmark(frame_benchmarking_cli::BenchmarkCmd), } -const AFTER_HELP_EXAMPLE: &str = color_print::cstr!( - r#"Examples: - polkadot-parachain --chain asset-hub-polkadot --sync warp -- --chain polkadot --sync warp - Launch a warp-syncing full node of the Asset Hub parachain on the Polkadot Relay Chain. - polkadot-parachain --chain asset-hub-polkadot --sync warp --relay-chain-rpc-url ws://rpc.example.com -- --chain polkadot - Launch a warp-syncing full node of the Asset Hub parachain on the Polkadot Relay Chain. - Uses ws://rpc.example.com as remote relay chain node. - "# -); #[derive(Debug, clap::Parser)] #[command( propagate_version = true, args_conflicts_with_subcommands = true, - subcommand_negates_reqs = true + subcommand_negates_reqs = true, + after_help = crate::examples(Self::executable_name()) )] -#[clap(after_help = AFTER_HELP_EXAMPLE)] pub struct Cli { #[command(subcommand)] pub subcommand: Option, @@ -80,6 +74,12 @@ pub struct Cli { #[command(flatten)] pub run: cumulus_client_cli::RunCmd, + /// EXPERIMENTAL: Use slot-based collator which can handle elastic scaling. + /// + /// Use with care, this flag is unstable and subject to change. + #[arg(long)] + pub experimental_use_slot_based: bool, + /// Disable automatic hardware benchmarks. /// /// By default these benchmarks are automatically ran at startup and measure @@ -92,7 +92,13 @@ pub struct Cli { /// Relay chain arguments #[arg(raw = true)] - pub relaychain_args: Vec, + pub relay_chain_args: Vec, +} + +impl Cli { + pub(crate) fn node_extra_args(&self) -> NodeExtraArgs { + NodeExtraArgs { use_slot_based_consensus: self.experimental_use_slot_based } + } } #[derive(Debug)] @@ -108,18 +114,32 @@ pub struct RelayChainCli { } impl RelayChainCli { - /// Parse the relay chain CLI parameters using the para chain `Configuration`. + fn polkadot_cmd() -> Command { + let help_template = color_print::cformat!( + "The arguments that are passed to the relay chain node. \n\ + \n\ + RELAY_CHAIN_ARGS: \n\ + {{options}}", + ); + + polkadot_cli::RunCmd::command() + .no_binary_name(true) + .help_template(help_template) + } + + /// Parse the relay chain CLI parameters using the parachain `Configuration`. pub fn new<'a>( para_config: &sc_service::Configuration, relay_chain_args: impl Iterator, ) -> Self { + let polkadot_cmd = Self::polkadot_cmd(); + let matches = polkadot_cmd.get_matches_from(relay_chain_args); + let base = FromArgMatches::from_arg_matches(&matches).unwrap_or_else(|e| e.exit()); + let extension = crate::chain_spec::Extensions::try_get(&*para_config.chain_spec); let chain_id = extension.map(|e| e.relay_chain.clone()); + let base_path = para_config.base_path.path().join("polkadot"); - Self { - base_path: Some(base_path), - chain_id, - base: clap::Parser::parse_from(relay_chain_args), - } + Self { base, chain_id, base_path: Some(base_path) } } } diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 653ea3281f0f769df9a3c88629e139244b187863..fcf6c06f42227d5f8b9bb4d0c50686f699854511 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -14,15 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +#[cfg(feature = "runtime-benchmarks")] +use crate::service::Block; use crate::{ chain_spec, chain_spec::GenericChainSpec, cli::{Cli, RelayChainCli, Subcommand}, + common::NodeExtraArgs, fake_runtime_api::{ - asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, aura::RuntimeApi, + asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, + aura::RuntimeApi as AuraRuntimeApi, }, - service::{new_partial, Block, Hash}, + service::{new_aura_node_spec, DynNodeSpec, ShellNode}, }; +#[cfg(feature = "runtime-benchmarks")] use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; @@ -34,27 +39,39 @@ use sc_cli::{ }; use sc_service::config::{BasePath, PrometheusConfig}; use sp_runtime::traits::AccountIdConversion; +#[cfg(feature = "runtime-benchmarks")] +use sp_runtime::traits::HashingFor; use std::{net::SocketAddr, path::PathBuf}; +/// The choice of consensus for the parachain omni-node. +#[derive(PartialEq, Eq, Debug, Default)] +pub enum Consensus { + /// Aura consensus. + #[default] + Aura, + /// Use the relay chain consensus. + // TODO: atm this is just a demonstration, not really reach-able. We can add it to the CLI, + // env, or the chain spec. Or, just don't, and when we properly refactor this mess we will + // re-introduce it. + #[allow(unused)] + Relay, +} + /// Helper enum that is used for better distinction of different parachain/runtime configuration /// (it is based/calculated on ChainSpec's ID attribute) -#[derive(Debug, PartialEq, Default)] +#[derive(Debug, PartialEq)] enum Runtime { - /// This is the default runtime (actually based on rococo) - #[default] - Default, + /// None of the system-chain runtimes, rather the node will act agnostic to the runtime ie. be + /// an omni-node, and simply run a node with the given consensus algorithm. + Omni(Consensus), Shell, Seedling, AssetHubPolkadot, - AssetHubKusama, - AssetHubRococo, - AssetHubWestend, + AssetHub, Penpal(ParaId), ContractsRococo, - CollectivesPolkadot, - CollectivesWestend, + Collectives, Glutton, - GluttonWestend, BridgeHub(chain_spec::bridge_hubs::BridgeHubRuntimeType), Coretime(chain_spec::coretime::CoretimeRuntimeType), People(chain_spec::people::PeopleRuntimeType), @@ -97,20 +114,20 @@ fn runtime(id: &str) -> Runtime { Runtime::Seedling } else if id.starts_with("asset-hub-polkadot") | id.starts_with("statemint") { Runtime::AssetHubPolkadot - } else if id.starts_with("asset-hub-kusama") | id.starts_with("statemine") { - Runtime::AssetHubKusama - } else if id.starts_with("asset-hub-rococo") { - Runtime::AssetHubRococo - } else if id.starts_with("asset-hub-westend") | id.starts_with("westmint") { - Runtime::AssetHubWestend + } else if id.starts_with("asset-hub-kusama") | + id.starts_with("statemine") | + id.starts_with("asset-hub-rococo") | + id.starts_with("rockmine") | + id.starts_with("asset-hub-westend") | + id.starts_with("westmint") + { + Runtime::AssetHub } else if id.starts_with("penpal") { Runtime::Penpal(para_id.unwrap_or(ParaId::new(0))) } else if id.starts_with("contracts-rococo") { Runtime::ContractsRococo - } else if id.starts_with("collectives-polkadot") { - Runtime::CollectivesPolkadot - } else if id.starts_with("collectives-westend") { - Runtime::CollectivesWestend + } else if id.starts_with("collectives-polkadot") || id.starts_with("collectives-westend") { + Runtime::Collectives } else if id.starts_with(chain_spec::bridge_hubs::BridgeHubRuntimeType::ID_PREFIX) { Runtime::BridgeHub( id.parse::() @@ -120,15 +137,17 @@ fn runtime(id: &str) -> Runtime { Runtime::Coretime( id.parse::().expect("Invalid value"), ) - } else if id.starts_with("glutton-westend") { - Runtime::GluttonWestend } else if id.starts_with("glutton") { Runtime::Glutton } else if id.starts_with(chain_spec::people::PeopleRuntimeType::ID_PREFIX) { Runtime::People(id.parse::().expect("Invalid value")) } else { - log::warn!("No specific runtime was recognized for ChainSpec's id: '{}', so Runtime::default() will be used", id); - Runtime::default() + log::warn!( + "No specific runtime was recognized for ChainSpec's id: '{}', \ + so Runtime::Omni(Consensus::Aura) will be used", + id + ); + Runtime::Omni(Consensus::Aura) } } @@ -274,55 +293,34 @@ fn load_spec(id: &str) -> std::result::Result, String> { /// (H/T to Phala for the idea) /// E.g. "penpal-kusama-2004" yields ("penpal-kusama", Some(2004)) fn extract_parachain_id(id: &str) -> (&str, &str, Option) { - const ROCOCO_TEST_PARA_PREFIX: &str = "penpal-rococo-"; - const KUSAMA_TEST_PARA_PREFIX: &str = "penpal-kusama-"; - const POLKADOT_TEST_PARA_PREFIX: &str = "penpal-polkadot-"; - - const GLUTTON_PARA_DEV_PREFIX: &str = "glutton-kusama-dev-"; - const GLUTTON_PARA_LOCAL_PREFIX: &str = "glutton-kusama-local-"; - const GLUTTON_PARA_GENESIS_PREFIX: &str = "glutton-kusama-genesis-"; - - const GLUTTON_WESTEND_PARA_DEV_PREFIX: &str = "glutton-westend-dev-"; - const GLUTTON_WESTEND_PARA_LOCAL_PREFIX: &str = "glutton-westend-local-"; - const GLUTTON_WESTEND_PARA_GENESIS_PREFIX: &str = "glutton-westend-genesis-"; - - let (norm_id, orig_id, para) = if let Some(suffix) = id.strip_prefix(ROCOCO_TEST_PARA_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..ROCOCO_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(KUSAMA_TEST_PARA_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..KUSAMA_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(POLKADOT_TEST_PARA_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..POLKADOT_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_DEV_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_PARA_DEV_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_LOCAL_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_PARA_LOCAL_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_GENESIS_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_DEV_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_WESTEND_PARA_DEV_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_LOCAL_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_WESTEND_PARA_LOCAL_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_GENESIS_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_WESTEND_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) - } else { - (id, id, None) - }; + let para_prefixes = [ + // Penpal + "penpal-rococo-", + "penpal-kusama-", + "penpal-polkadot-", + // Glutton Kusama + "glutton-kusama-dev-", + "glutton-kusama-local-", + "glutton-kusama-genesis-", + // Glutton Westend + "glutton-westend-dev-", + "glutton-westend-local-", + "glutton-westend-genesis-", + ]; + + for para_prefix in para_prefixes { + if let Some(suffix) = id.strip_prefix(para_prefix) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + return (&id[..para_prefix.len() - 1], id, Some(para_id.into())) + } + } - (norm_id, orig_id, para.map(Into::into)) + (id, id, None) } impl SubstrateCli for Cli { fn impl_name() -> String { - "Polkadot parachain".into() + Self::executable_name() } fn impl_version() -> String { @@ -331,10 +329,12 @@ impl SubstrateCli for Cli { fn description() -> String { format!( - "Polkadot parachain\n\nThe command-line arguments provided first will be \ - passed to the parachain node, while the arguments provided after -- will be passed \ - to the relaychain node.\n\n\ - {} [parachain-args] -- [relaychain-args]", + "The command-line arguments provided first will be passed to the parachain node, \n\ + and the arguments provided after -- will be passed to the relay chain node. \n\ + \n\ + Example: \n\ + \n\ + {} [parachain-args] -- [relay-chain-args]", Self::executable_name() ) } @@ -358,33 +358,27 @@ impl SubstrateCli for Cli { impl SubstrateCli for RelayChainCli { fn impl_name() -> String { - "Polkadot parachain".into() + Cli::impl_name() } fn impl_version() -> String { - env!("SUBSTRATE_CLI_IMPL_VERSION").into() + Cli::impl_version() } fn description() -> String { - format!( - "Polkadot parachain\n\nThe command-line arguments provided first will be \ - passed to the parachain node, while the arguments provided after -- will be passed \ - to the relay chain node.\n\n\ - {} [parachain-args] -- [relay_chain-args]", - Self::executable_name() - ) + Cli::description() } fn author() -> String { - env!("CARGO_PKG_AUTHORS").into() + Cli::author() } fn support_url() -> String { - "https://github.com/paritytech/polkadot-sdk/issues/new".into() + Cli::support_url() } fn copyright_start_year() -> i32 { - 2017 + Cli::copyright_start_year() } fn load_spec(&self, id: &str) -> std::result::Result, String> { @@ -392,108 +386,27 @@ impl SubstrateCli for RelayChainCli { } } -/// Creates partial components for the runtimes that are supported by the benchmarks. -macro_rules! construct_partials { - ($config:expr, |$partials:ident| $code:expr) => { - match $config.chain_spec.runtime()? { - Runtime::AssetHubPolkadot => { - let $partials = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, - )?; - $code - }, - Runtime::AssetHubKusama | - Runtime::AssetHubRococo | - Runtime::AssetHubWestend | - Runtime::BridgeHub(_) | - Runtime::CollectivesPolkadot | - Runtime::CollectivesWestend | - Runtime::Coretime(_) | - Runtime::People(_) => { - let $partials = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AuraId>, - )?; - $code - }, - Runtime::GluttonWestend | Runtime::Glutton | Runtime::Shell | Runtime::Seedling => { - let $partials = new_partial::( - &$config, - crate::service::build_shell_import_queue, - )?; - $code - }, - Runtime::ContractsRococo | Runtime::Penpal(_) | Runtime::Default => { - let $partials = new_partial::( - &$config, - crate::service::build_aura_import_queue, - )?; - $code - }, - } - }; -} - -macro_rules! construct_async_run { - (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ - let runner = $cli.create_runner($cmd)?; - match runner.config().chain_spec.runtime()? { - Runtime::AssetHubPolkadot => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::AssetHubKusama | - Runtime::AssetHubRococo | - Runtime::AssetHubWestend | - Runtime::BridgeHub(_) | - Runtime::CollectivesPolkadot | - Runtime::CollectivesWestend | - Runtime::Coretime(_) | - Runtime::People(_) => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Shell | - Runtime::Seedling | - Runtime::GluttonWestend | - Runtime::Glutton => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::build_shell_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - } - Runtime::ContractsRococo | Runtime::Penpal(_) | Runtime::Default => { - runner.async_run(|$config| { - let $components = new_partial::< - RuntimeApi, - _, - >( - &$config, - crate::service::build_aura_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - } - }} +fn new_node_spec( + config: &sc_service::Configuration, + extra_args: NodeExtraArgs, +) -> std::result::Result, sc_cli::Error> { + Ok(match config.chain_spec.runtime()? { + Runtime::AssetHubPolkadot => + new_aura_node_spec::(extra_args), + Runtime::AssetHub | + Runtime::BridgeHub(_) | + Runtime::Collectives | + Runtime::Coretime(_) | + Runtime::People(_) | + Runtime::ContractsRococo | + Runtime::Glutton | + Runtime::Penpal(_) => new_aura_node_spec::(extra_args), + Runtime::Shell | Runtime::Seedling => Box::new(ShellNode), + Runtime::Omni(consensus) => match consensus { + Consensus::Aura => new_aura_node_spec::(extra_args), + Consensus::Relay => Box::new(ShellNode), + }, + }) } /// Parse command line arguments into service configuration. @@ -506,37 +419,45 @@ pub fn run() -> Result<()> { runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) }, Some(Subcommand::CheckBlock(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_check_block_cmd(config, cmd) }) }, Some(Subcommand::ExportBlocks(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, config.database)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_export_blocks_cmd(config, cmd) }) }, Some(Subcommand::ExportState(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, config.chain_spec)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_export_state_cmd(config, cmd) }) }, Some(Subcommand::ImportBlocks(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_import_blocks_cmd(config, cmd) + }) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_revert_cmd(config, cmd) }) }, - Some(Subcommand::Revert(cmd)) => construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.backend, None)) - }), Some(Subcommand::PurgeChain(cmd)) => { let runner = cli.create_runner(cmd)?; + let polkadot_cli = RelayChainCli::new(runner.config(), cli.relay_chain_args.iter()); runner.sync_run(|config| { - let polkadot_cli = RelayChainCli::new( - &config, - [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), - ); - let polkadot_config = SubstrateCli::create_configuration( &polkadot_cli, &polkadot_cli, @@ -549,8 +470,10 @@ pub fn run() -> Result<()> { }, Some(Subcommand::ExportGenesisHead(cmd)) => { let runner = cli.create_runner(cmd)?; - runner - .sync_run(|config| construct_partials!(config, |partials| cmd.run(partials.client))) + runner.sync_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.run_export_genesis_head_cmd(config, cmd) + }) }, Some(Subcommand::ExportGenesisWasm(cmd)) => { let runner = cli.create_runner(cmd)?; @@ -564,45 +487,34 @@ pub fn run() -> Result<()> { // Switch on the concrete benchmark sub-command- match cmd { - BenchmarkCmd::Pallet(cmd) => - if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run_with_spec::, ReclaimHostFunctions>(Some(config.chain_spec))) - } else { - Err("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`." - .into()) - }, + #[cfg(feature = "runtime-benchmarks")] + BenchmarkCmd::Pallet(cmd) => runner.sync_run(|config| { + cmd.run_with_spec::, ReclaimHostFunctions>(Some( + config.chain_spec, + )) + }), BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { - construct_partials!(config, |partials| cmd.run(partials.client)) + let node = new_node_spec(&config, cli.node_extra_args())?; + node.run_benchmark_block_cmd(config, cmd) }), - #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => - return Err(sc_cli::Error::Input( - "Compile with --features=runtime-benchmarks \ - to enable storage benchmarks." - .into(), - ) - .into()), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { - construct_partials!(config, |partials| { - let db = partials.backend.expose_db(); - let storage = partials.backend.expose_storage(); - - cmd.run(config, partials.client.clone(), db, storage) - }) + let node = new_node_spec(&config, cli.node_extra_args())?; + node.run_benchmark_storage_cmd(config, cmd) }), BenchmarkCmd::Machine(cmd) => runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())), - // NOTE: this allows the Client to leniently implement - // new benchmark commands without requiring a companion MR. #[allow(unreachable_patterns)] - _ => Err("Benchmarking sub-command unsupported".into()), + _ => Err("Benchmarking sub-command unsupported or compilation feature missing. \ + Make sure to compile with --features=runtime-benchmarks \ + to enable all supported benchmarks." + .into()), } }, Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?), None => { let runner = cli.create_runner(&cli.run.normalize())?; + let polkadot_cli = RelayChainCli::new(runner.config(), cli.relay_chain_args.iter()); let collator_options = cli.run.collator_options(); runner.run_node_until_exit(|config| async move { @@ -624,230 +536,82 @@ pub fn run() -> Result<()> { if old_path.exists() && new_path.exists() { return Err(format!( - "Found legacy {} path {} and new asset-hub path {}. Delete one path such that only one exists.", - old_name, old_path.display(), new_path.display() - ).into()) + "Found legacy {} path {} and new Asset Hub path {}. \ + Delete one path such that only one exists.", + old_name, + old_path.display(), + new_path.display() + ) + .into()) } if old_path.exists() { std::fs::rename(old_path.clone(), new_path.clone())?; info!( - "Statemint renamed to Asset Hub. The filepath with associated data on disk has been renamed from {} to {}.", - old_path.display(), new_path.display() + "{} was renamed to Asset Hub. The filepath with associated data on disk \ + has been renamed from {} to {}.", + old_name, + old_path.display(), + new_path.display() ); } } - let hwbench = (!cli.no_hardware_benchmarks).then_some( - config.database.path().map(|database_path| { + let hwbench = (!cli.no_hardware_benchmarks) + .then_some(config.database.path().map(|database_path| { let _ = std::fs::create_dir_all(database_path); sc_sysinfo::gather_hwbench(Some(database_path)) - })).flatten(); + })) + .flatten(); let para_id = chain_spec::Extensions::try_get(&*config.chain_spec) .map(|e| e.para_id) .ok_or("Could not find parachain extension in chain-spec.")?; - let polkadot_cli = RelayChainCli::new( - &config, - [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), - ); - let id = ParaId::from(para_id); let parachain_account = - AccountIdConversion::::into_account_truncating(&id); + AccountIdConversion::::into_account_truncating( + &id, + ); let tokio_handle = config.tokio_handle.clone(); let polkadot_config = SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, tokio_handle) .map_err(|err| format!("Relay chain argument error: {}", err))?; - info!("Parachain id: {:?}", id); - info!("Parachain Account: {}", parachain_account); - info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - - match polkadot_config.network.network_backend { - sc_network::config::NetworkBackendType::Libp2p => - start_node::>( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await, - sc_network::config::NetworkBackendType::Litep2p => - start_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await, - } + info!("๐Ÿชช Parachain id: {:?}", id); + info!("๐Ÿงพ Parachain Account: {}", parachain_account); + info!("โœ๏ธ Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); + + start_node( + config, + polkadot_config, + collator_options, + id, + cli.node_extra_args(), + hwbench, + ) + .await }) }, } } -async fn start_node>( +#[sc_tracing::logging::prefix_logs_with("Parachain")] +async fn start_node( config: sc_service::Configuration, polkadot_config: sc_service::Configuration, collator_options: cumulus_client_cli::CollatorOptions, id: ParaId, + extra_args: NodeExtraArgs, hwbench: Option, ) -> Result { - match config.chain_spec.runtime()? { - Runtime::AssetHubPolkadot => crate::service::start_asset_hub_lookahead_node::< - AssetHubPolkadotRuntimeApi, - AssetHubPolkadotAuraId, - Network, - >(config, polkadot_config, collator_options, id, hwbench) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::AssetHubRococo | Runtime::AssetHubWestend | Runtime::AssetHubKusama => - crate::service::start_asset_hub_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::CollectivesWestend | Runtime::CollectivesPolkadot => - crate::service::start_generic_aura_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::Seedling | Runtime::Shell => crate::service::start_shell_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::ContractsRococo => crate::service::start_contracts_rococo_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) + let node_spec = new_node_spec(&config, extra_args)?; + node_spec + .start_node(config, polkadot_config, collator_options, id, hwbench) .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment | - chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => - crate::service::start_generic_aura_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0), - } - .map_err(Into::into), - - Runtime::Coretime(coretime_runtime_type) => match coretime_runtime_type { - chain_spec::coretime::CoretimeRuntimeType::Kusama | - chain_spec::coretime::CoretimeRuntimeType::KusamaLocal | - chain_spec::coretime::CoretimeRuntimeType::Polkadot | - chain_spec::coretime::CoretimeRuntimeType::PolkadotLocal | - chain_spec::coretime::CoretimeRuntimeType::Rococo | - chain_spec::coretime::CoretimeRuntimeType::RococoLocal | - chain_spec::coretime::CoretimeRuntimeType::RococoDevelopment | - chain_spec::coretime::CoretimeRuntimeType::Westend | - chain_spec::coretime::CoretimeRuntimeType::WestendLocal | - chain_spec::coretime::CoretimeRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0), - } - .map_err(Into::into), - - Runtime::Penpal(_) | Runtime::Default => - crate::service::start_rococo_parachain_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::Glutton | Runtime::GluttonWestend => - crate::service::start_basic_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::People(people_runtime_type) => match people_runtime_type { - chain_spec::people::PeopleRuntimeType::Kusama | - chain_spec::people::PeopleRuntimeType::KusamaLocal | - chain_spec::people::PeopleRuntimeType::Polkadot | - chain_spec::people::PeopleRuntimeType::PolkadotLocal | - chain_spec::people::PeopleRuntimeType::Rococo | - chain_spec::people::PeopleRuntimeType::RococoLocal | - chain_spec::people::PeopleRuntimeType::RococoDevelopment | - chain_spec::people::PeopleRuntimeType::Westend | - chain_spec::people::PeopleRuntimeType::WestendLocal | - chain_spec::people::PeopleRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0), - } - .map_err(Into::into), - } + .map_err(Into::into) } impl DefaultConfigurationValues for RelayChainCli { @@ -979,7 +743,7 @@ impl CliConfiguration for RelayChainCli { mod tests { use crate::{ chain_spec::{get_account_id_from_seed, get_from_seed}, - command::{Runtime, RuntimeResolver}, + command::{Consensus, Runtime, RuntimeResolver}, }; use sc_chain_spec::{ChainSpec, ChainSpecExtension, ChainSpecGroup, ChainType, Extension}; use serde::{Deserialize, Serialize}; @@ -1006,9 +770,9 @@ mod tests { pub attribute_z: u32, } - fn store_configuration(dir: &TempDir, spec: Box) -> PathBuf { + fn store_configuration(dir: &TempDir, spec: &dyn ChainSpec) -> PathBuf { let raw_output = true; - let json = sc_service::chain_ops::build_spec(&*spec, raw_output) + let json = sc_service::chain_ops::build_spec(spec, raw_output) .expect("Failed to build json string"); let mut cfg_file_path = dir.path().to_path_buf(); cfg_file_path.push(spec.id()); @@ -1049,32 +813,44 @@ mod tests { let path = store_configuration( &temp_dir, - Box::new(create_default_with_extensions("shell-1", Extensions1::default())), + &create_default_with_extensions("shell-1", Extensions1::default()), ); assert_eq!(Runtime::Shell, path.runtime().unwrap()); let path = store_configuration( &temp_dir, - Box::new(create_default_with_extensions("shell-2", Extensions2::default())), + &create_default_with_extensions("shell-2", Extensions2::default()), ); assert_eq!(Runtime::Shell, path.runtime().unwrap()); let path = store_configuration( &temp_dir, - Box::new(create_default_with_extensions("seedling", Extensions2::default())), + &create_default_with_extensions("seedling", Extensions2::default()), ); assert_eq!(Runtime::Seedling, path.runtime().unwrap()); let path = store_configuration( &temp_dir, - Box::new(crate::chain_spec::rococo_parachain::rococo_parachain_local_config()), + &create_default_with_extensions("penpal-rococo-1000", Extensions2::default()), ); - assert_eq!(Runtime::Default, path.runtime().unwrap()); + assert_eq!(Runtime::Penpal(1000.into()), path.runtime().unwrap()); let path = store_configuration( &temp_dir, - Box::new(crate::chain_spec::contracts::contracts_rococo_local_config()), + &create_default_with_extensions("penpal-polkadot-2000", Extensions2::default()), + ); + assert_eq!(Runtime::Penpal(2000.into()), path.runtime().unwrap()); + + let path = store_configuration( + &temp_dir, + &crate::chain_spec::contracts::contracts_rococo_local_config(), ); assert_eq!(Runtime::ContractsRococo, path.runtime().unwrap()); + + let path = store_configuration( + &temp_dir, + &crate::chain_spec::rococo_parachain::rococo_parachain_local_config(), + ); + assert_eq!(Runtime::Omni(Consensus::Aura), path.runtime().unwrap()); } } diff --git a/cumulus/polkadot-parachain/src/common/aura.rs b/cumulus/polkadot-parachain/src/common/aura.rs new file mode 100644 index 0000000000000000000000000000000000000000..9f72d847926f3a7be480911bdb273e33bf6c0afb --- /dev/null +++ b/cumulus/polkadot-parachain/src/common/aura.rs @@ -0,0 +1,68 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Aura-related primitives for cumulus parachain collators. + +use codec::Codec; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::BlockT; +use sp_consensus_aura::AuraApi; +use sp_runtime::app_crypto::{AppCrypto, AppPair, AppSignature, Pair}; + +/// Convenience trait for defining the basic bounds of an `AuraId`. +pub trait AuraIdT: AppCrypto + Codec + Send { + /// Extra bounds for the `Pair`. + type BoundedPair: AppPair + AppCrypto; + + /// Extra bounds for the `Signature`. + type BoundedSignature: AppSignature + + TryFrom> + + std::hash::Hash + + sp_runtime::traits::Member + + Codec; +} + +impl AuraIdT for T +where + T: AppCrypto + Codec + Send + Sync, + <::Pair as AppCrypto>::Signature: + TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, +{ + type BoundedPair = ::Pair; + type BoundedSignature = <::Pair as AppCrypto>::Signature; +} + +/// Convenience trait for defining the basic bounds of a parachain runtime that supports +/// the Aura consensus. +pub trait AuraRuntimeApi: + sp_api::ApiExt + + AuraApi::Public> + + AuraUnincludedSegmentApi + + Sized +{ + /// Check if the runtime has the Aura API. + fn has_aura_api(&self, at: Block::Hash) -> bool { + self.has_api::::Public>>(at) + .unwrap_or(false) + } +} + +impl AuraRuntimeApi for T where + T: sp_api::ApiExt + + AuraApi::Public> + + AuraUnincludedSegmentApi +{ +} diff --git a/cumulus/polkadot-parachain/src/common/mod.rs b/cumulus/polkadot-parachain/src/common/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..9f5febafe30427fbd549b79e81c83d2392a795d5 --- /dev/null +++ b/cumulus/polkadot-parachain/src/common/mod.rs @@ -0,0 +1,72 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Cumulus parachain collator primitives. + +#![warn(missing_docs)] + +pub mod aura; + +use cumulus_primitives_core::CollectCollationInfo; +use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, Metadata}; +use sp_block_builder::BlockBuilder; +use sp_runtime::traits::Block as BlockT; +use sp_session::SessionKeys; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; + +/// Convenience trait that defines the basic bounds for the `RuntimeApi` of a parachain node. +pub trait NodeRuntimeApi: + ApiExt + + Metadata + + SessionKeys + + BlockBuilder + + TaggedTransactionQueue + + CollectCollationInfo + + Sized +{ +} + +impl NodeRuntimeApi for T where + T: ApiExt + + Metadata + + SessionKeys + + BlockBuilder + + TaggedTransactionQueue + + CollectCollationInfo +{ +} + +/// Convenience trait that defines the basic bounds for the `ConstructRuntimeApi` of a parachain +/// node. +pub trait ConstructNodeRuntimeApi>: + ConstructRuntimeApi + Send + Sync + 'static +{ + /// Basic bounds for the `RuntimeApi` of a parachain node. + type BoundedRuntimeApi: NodeRuntimeApi; +} + +impl> ConstructNodeRuntimeApi for T +where + T: ConstructRuntimeApi + Send + Sync + 'static, + T::RuntimeApi: NodeRuntimeApi, +{ + type BoundedRuntimeApi = T::RuntimeApi; +} + +/// Extra args that are passed when creating a new node spec. +pub struct NodeExtraArgs { + pub use_slot_based_consensus: bool, +} diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs index 82c02943c5fc9ee8337cca377ddf2271b454cdda..0b79d338c16813238e70a29d454e91882113128f 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs @@ -105,12 +105,6 @@ sp_api::impl_runtime_apis! { } } - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(_: &::Header) { - unimplemented!() - } - } - impl sp_session::SessionKeys for Runtime { fn generate_session_keys(_: Option>) -> Vec { unimplemented!() diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs index 6b718e9121642e37640ad09ee9364974a3d0aa38..823eb9ab584a06ea1370d6b4c03ce124c426f9fe 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs @@ -105,12 +105,6 @@ sp_api::impl_runtime_apis! { } } - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(_: &::Header) { - unimplemented!() - } - } - impl sp_session::SessionKeys for Runtime { fn generate_session_keys(_: Option>) -> Vec { unimplemented!() diff --git a/cumulus/polkadot-parachain/src/main.rs b/cumulus/polkadot-parachain/src/main.rs index 0757bea84aae83b64ec24982874c28f095057e75..cbb76fa214cbedf761975888e63f7ef67910d5fa 100644 --- a/cumulus/polkadot-parachain/src/main.rs +++ b/cumulus/polkadot-parachain/src/main.rs @@ -14,14 +14,38 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! Cumulus test parachain collator +//! Polkadot parachain node. #![warn(missing_docs)] #![warn(unused_extern_crates)] +pub(crate) fn examples(executable_name: String) -> String { + color_print::cformat!( + r#"Examples: + + {0} --chain para.json --sync warp -- --chain relay.json --sync warp + Launch a warp-syncing full node of a given para's chain-spec, and a given relay's chain-spec. + + The above approach is the most flexible, and the most forward-compatible way to spawn an omni-node. + + You can find the chain-spec of some networks in: + https://paritytech.github.io/chainspecs + + {0} --chain asset-hub-polkadot --sync warp -- --chain polkadot --sync warp + Launch a warp-syncing full node of the Asset Hub parachain on the Polkadot Relay Chain. + + {0} --chain asset-hub-kusama --sync warp --relay-chain-rpc-url ws://rpc.example.com -- --chain kusama + Launch a warp-syncing full node of the Asset Hub parachain on the Kusama Relay Chain. + Uses ws://rpc.example.com as remote relay chain node. + "#, + executable_name, + ) +} + mod chain_spec; mod cli; mod command; +mod common; mod fake_runtime_api; mod rpc; mod service; diff --git a/cumulus/polkadot-parachain/src/rpc.rs b/cumulus/polkadot-parachain/src/rpc.rs index 7437bb1f4b9372f2454cbe6d491d302056606571..283a73d931d769fbd7b521c6f8a4a7558fc48be0 100644 --- a/cumulus/polkadot-parachain/src/rpc.rs +++ b/cumulus/polkadot-parachain/src/rpc.rs @@ -18,91 +18,82 @@ #![warn(missing_docs)] -use std::sync::Arc; - +use crate::{ + common::ConstructNodeRuntimeApi, + service::{ParachainBackend, ParachainClient}, +}; +use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use parachains_common::{AccountId, Balance, Block, Nonce}; -use sc_client_api::AuxStore; -pub use sc_rpc::DenyUnsafe; -use sc_transaction_pool_api::TransactionPool; -use sp_api::ProvideRuntimeApi; -use sp_block_builder::BlockBuilder; -use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sc_rpc::{ + dev::{Dev, DevApiServer}, + DenyUnsafe, +}; +use std::{marker::PhantomData, sync::Arc}; +use substrate_frame_rpc_system::{System, SystemApiServer}; +use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; /// A type representing all RPC extensions. pub type RpcExtension = jsonrpsee::RpcModule<()>; -/// Full client dependencies -pub struct FullDeps { - /// The client instance to use. - pub client: Arc, - /// Transaction pool instance. - pub pool: Arc

, - /// Whether to deny unsafe calls - pub deny_unsafe: DenyUnsafe, +pub(crate) trait BuildRpcExtensions { + fn build_rpc_extensions( + deny_unsafe: DenyUnsafe, + client: Arc, + backend: Arc, + pool: Arc, + ) -> sc_service::error::Result; } -/// Instantiate all RPC extensions. -pub fn create_full( - deps: FullDeps, - backend: Arc, -) -> Result> +pub(crate) struct BuildEmptyRpcExtensions(PhantomData); + +impl + BuildRpcExtensions< + ParachainClient, + ParachainBackend, + sc_transaction_pool::FullPool>, + > for BuildEmptyRpcExtensions where - C: ProvideRuntimeApi - + HeaderBackend - + AuxStore - + HeaderMetadata - + Send - + Sync - + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: BlockBuilder, - P: TransactionPool + Sync + Send + 'static, - B: sc_client_api::Backend + Send + Sync + 'static, - B::State: sc_client_api::backend::StateBackend>, + RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static, { - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; - use substrate_frame_rpc_system::{System, SystemApiServer}; - use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; - - let mut module = RpcExtension::new(()); - let FullDeps { client, pool, deny_unsafe } = deps; - - module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; - module.merge(TransactionPayment::new(client.clone()).into_rpc())?; - module.merge(StateMigration::new(client, backend, deny_unsafe).into_rpc())?; - - Ok(module) + fn build_rpc_extensions( + _deny_unsafe: DenyUnsafe, + _client: Arc>, + _backend: Arc, + _pool: Arc>>, + ) -> sc_service::error::Result { + Ok(RpcExtension::new(())) + } } -/// Instantiate all RPCs we want at the contracts-rococo chain. -pub fn create_contracts_rococo( - deps: FullDeps, -) -> Result> +pub(crate) struct BuildParachainRpcExtensions(PhantomData); + +impl + BuildRpcExtensions< + ParachainClient, + ParachainBackend, + sc_transaction_pool::FullPool>, + > for BuildParachainRpcExtensions where - C: ProvideRuntimeApi - + sc_client_api::BlockBackend - + HeaderBackend - + AuxStore - + HeaderMetadata - + Send - + Sync - + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: BlockBuilder, - P: TransactionPool + Sync + Send + 'static, + RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + substrate_frame_rpc_system::AccountNonceApi, { - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; - use sc_rpc::dev::{Dev, DevApiServer}; - use substrate_frame_rpc_system::{System, SystemApiServer}; - - let mut module = RpcExtension::new(()); - let FullDeps { client, pool, deny_unsafe } = deps; - - module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; - module.merge(TransactionPayment::new(client.clone()).into_rpc())?; - module.merge(Dev::new(client, deny_unsafe).into_rpc())?; - - Ok(module) + fn build_rpc_extensions( + deny_unsafe: DenyUnsafe, + client: Arc>, + backend: Arc, + pool: Arc>>, + ) -> sc_service::error::Result { + let build = || -> Result> { + let mut module = RpcExtension::new(()); + + module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; + module.merge(TransactionPayment::new(client.clone()).into_rpc())?; + module.merge(StateMigration::new(client.clone(), backend, deny_unsafe).into_rpc())?; + module.merge(Dev::new(client, deny_unsafe).into_rpc())?; + + Ok(module) + }; + build().map_err(Into::into) + } } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 19ad75e384cecd307608cd02cdbf67a42c5a93b0..f5f6189d1f0d6b399f584df77bf70bd3961781df 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -14,56 +14,57 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use codec::{Codec, Decode}; -use cumulus_client_cli::CollatorOptions; +use cumulus_client_cli::{CollatorOptions, ExportGenesisHeadCommand}; use cumulus_client_collator::service::CollatorService; -use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; -use cumulus_client_consensus_common::{ - ParachainBlockImport as TParachainBlockImport, ParachainCandidate, ParachainConsensus, +use cumulus_client_consensus_aura::collators::{ + lookahead::{self as aura, Params as AuraParams}, + slot_based::{self as slot_based, Params as SlotBasedParams}, }; +use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; use cumulus_client_consensus_proposer::Proposer; +use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] use cumulus_client_service::old_consensus; use cumulus_client_service::{ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, }; -use cumulus_primitives_core::{ - relay_chain::{Hash as PHash, PersistedValidationData, ValidationCode}, - ParaId, -}; +use cumulus_primitives_core::{relay_chain::ValidationCode, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; -use sc_rpc::DenyUnsafe; -use sp_core::Pair; -use jsonrpsee::RpcModule; - -use crate::{fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi, rpc}; -pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Header, Nonce}; +use crate::{ + common::{ + aura::{AuraIdT, AuraRuntimeApi}, + ConstructNodeRuntimeApi, NodeExtraArgs, + }, + fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi, + rpc::BuildRpcExtensions, +}; +pub use parachains_common::{AccountId, Balance, Block, Hash, Nonce}; -use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; -use futures::{lock::Mutex, prelude::*}; +use crate::rpc::{BuildEmptyRpcExtensions, BuildParachainRpcExtensions}; +use frame_benchmarking_cli::BlockCmd; +#[cfg(any(feature = "runtime-benchmarks"))] +use frame_benchmarking_cli::StorageCmd; +use futures::prelude::*; +use polkadot_primitives::CollatorPair; use prometheus_endpoint::Registry; +use sc_cli::{CheckBlockCmd, ExportBlocksCmd, ExportStateCmd, ImportBlocksCmd, RevertCmd}; +use sc_client_api::BlockchainEvents; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, - BlockImportParams, ImportQueue, + BlockImportParams, DefaultImportQueue, ImportQueue, }; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::{config::FullNetworkConfiguration, service::traits::NetworkBackend, NetworkBlock}; -use sc_network_sync::SyncingService; -use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; +use sc_service::{Configuration, Error, PartialComponents, TFullBackend, TFullClient, TaskManager}; +use sc_sysinfo::HwBench; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; -use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi}; -use sp_consensus_aura::AuraApi; -use sp_core::traits::SpawnEssentialNamed; +use sc_transaction_pool::FullPool; +use sp_api::ProvideRuntimeApi; use sp_keystore::KeystorePtr; -use sp_runtime::{ - app_crypto::AppCrypto, - traits::{Block as BlockT, Header as HeaderT}, -}; -use std::{marker::PhantomData, sync::Arc, time::Duration}; - -use polkadot_primitives::CollatorPair; +use sp_runtime::{app_crypto::AppCrypto, traits::Header as HeaderT}; +use std::{marker::PhantomData, pin::Pin, sync::Arc, time::Duration}; #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = cumulus_client_service::ParachainHostFunctions; @@ -74,9 +75,9 @@ type HostFunctions = ( frame_benchmarking::benchmarking::HostFunctions, ); -type ParachainClient = TFullClient>; +pub type ParachainClient = TFullClient>; -type ParachainBackend = TFullBackend; +pub type ParachainBackend = TFullBackend; type ParachainBlockImport = TParachainBlockImport>, ParachainBackend>; @@ -91,502 +92,317 @@ pub type Service = PartialComponents< (ParachainBlockImport, Option, Option), >; -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -pub fn new_partial( - config: &Configuration, - build_import_queue: BIQ, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, -{ - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let heap_pages = config - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ }); - - let executor = sc_executor::WasmExecutor::::builder() - .with_execution_method(config.wasm_method) - .with_max_runtime_instances(config.max_runtime_instances) - .with_runtime_cache_size(config.runtime_cache_size) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .build(); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_record_import::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - true, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - let import_queue = build_import_queue( - client.clone(), - block_import.clone(), - config, - telemetry.as_ref().map(|telemetry| telemetry.handle()), - &task_manager, - )?; - - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle), - }) +pub(crate) trait BuildImportQueue { + fn build_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result>; } -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node_impl( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - sybil_resistance_level: CollatorSybilResistance, - para_id: ParaId, - rpc_ext_builder: RB, - build_import_queue: BIQ, - start_consensus: SC, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +pub(crate) trait StartConsensus where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + substrate_frame_rpc_system::AccountNonceApi, - RB: Fn( - DenyUnsafe, - Arc>, - Arc, - Arc>>, - ) -> Result, sc_service::Error> - + 'static, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, - SC: FnOnce( - Arc>, - ParachainBlockImport, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - Arc>, - KeystorePtr, - Duration, - ParaId, - CollatorPair, - OverseerHandle, - Arc>) + Send + Sync>, - Arc, - ) -> Result<(), sc_service::Error>, - Net: NetworkBackend, + RuntimeApi: ConstructNodeRuntimeApi>, { - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial::(¶chain_config, build_import_queue)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network); - - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level, - }) - .await?; - - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - let backend_for_rpc = backend.clone(); - - Box::new(move |deny_unsafe, _| { - rpc_ext_builder( - deny_unsafe, - client.clone(), - backend_for_rpc.clone(), - transaction_pool.clone(), - ) - }) - }; - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, + ) -> Result<(), sc_service::Error>; +} - let relay_chain_slot_duration = Duration::from_secs(6); +pub(crate) trait NodeSpec { + type RuntimeApi: ConstructNodeRuntimeApi>; + + type BuildImportQueue: BuildImportQueue + 'static; + + type BuildRpcExtensions: BuildRpcExtensions< + ParachainClient, + ParachainBackend, + sc_transaction_pool::FullPool>, + > + 'static; + + type StartConsensus: StartConsensus + 'static; + + const SYBIL_RESISTANCE: CollatorSybilResistance; + + /// Starts a `ServiceBuilder` for a full service. + /// + /// Use this macro if you don't actually need the full service, but just the builder in order to + /// be able to perform chain operations. + fn new_partial(config: &Configuration) -> sc_service::error::Result> { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let heap_pages = config.default_heap_pages.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| { + HeapAllocStrategy::Static { extra_pages: h as _ } + }); + + let executor = sc_executor::WasmExecutor::::builder() + .with_execution_method(config.wasm_method) + .with_max_runtime_instances(config.max_runtime_instances) + .with_runtime_cache_size(config.runtime_cache_size) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .build(); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts_record_import::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + true, + )?; + let client = Arc::new(client); + + let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), - para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; - - if validator { - start_consensus( + let import_queue = Self::BuildImportQueue::build_import_queue( client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), + block_import.clone(), + config, + telemetry.as_ref().map(|telemetry| telemetry.handle()), &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), )?; - } - - start_network.start_network(); - - Ok((task_manager, client)) -} -/// Build the import queue for Aura-based runtimes. -pub fn build_aura_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - cumulus_client_consensus_aura::import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - >(cumulus_client_consensus_aura::ImportQueueParams { - block_import, - client, - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, - registry: config.prometheus_registry(), - spawner: &task_manager.spawn_essential_handle(), - telemetry, - }) - .map_err(Into::into) -} - -/// Start a rococo parachain node. -pub async fn start_rococo_parachain_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_aura_import_queue, - start_lookahead_aura_consensus, - hwbench, - ) - .await -} - -/// Build the import queue for the shell runtime. -pub fn build_shell_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - _: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - cumulus_client_consensus_relay_chain::import_queue( - client, - block_import, - |_, _| async { Ok(()) }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - ) - .map_err(Into::into) -} - -fn build_parachain_rpc_extensions( - deny_unsafe: sc_rpc::DenyUnsafe, - client: Arc>, - backend: Arc, - pool: Arc>>, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_block_builder::BlockBuilder - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + substrate_frame_rpc_system::AccountNonceApi, -{ - let deps = rpc::FullDeps { client, pool, deny_unsafe }; + Ok(PartialComponents { + backend, + client, + import_queue, + keystore_container, + task_manager, + transaction_pool, + select_chain: (), + other: (block_import, telemetry, telemetry_worker_handle), + }) + } - rpc::create_full(deps, backend).map_err(Into::into) -} + /// Start a node with the given parachain spec. + /// + /// This is the actual implementation that is abstract over the executor and the runtime api. + fn start_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, + ) -> Pin>>> + where + Net: NetworkBackend, + { + Box::pin(async move { + let parachain_config = prepare_node_config(parachain_config); + + let params = Self::new_partial(¶chain_config)?; + let (block_import, mut telemetry, telemetry_worker_handle) = params.other; + + let client = params.client.clone(); + let backend = params.backend.clone(); + + let mut task_manager = params.task_manager; + let (relay_chain_interface, collator_key) = build_relay_chain_interface( + polkadot_config, + ¶chain_config, + telemetry_worker_handle, + &mut task_manager, + collator_options.clone(), + hwbench.clone(), + ) + .await + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + + let validator = parachain_config.role.is_authority(); + let prometheus_registry = parachain_config.prometheus_registry().cloned(); + let transaction_pool = params.transaction_pool.clone(); + let import_queue_service = params.import_queue.service(); + let net_config = FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network); + + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + build_network(BuildNetworkParams { + parachain_config: ¶chain_config, + net_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + para_id, + spawn_handle: task_manager.spawn_handle(), + relay_chain_interface: relay_chain_interface.clone(), + import_queue: params.import_queue, + sybil_resistance_level: Self::SYBIL_RESISTANCE, + }) + .await?; + + let rpc_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend_for_rpc = backend.clone(); + + Box::new(move |deny_unsafe, _| { + Self::BuildRpcExtensions::build_rpc_extensions( + deny_unsafe, + client.clone(), + backend_for_rpc.clone(), + transaction_pool.clone(), + ) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + rpc_builder, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + config: parachain_config, + keystore: params.keystore_container.keystore(), + backend: backend.clone(), + network: network.clone(), + sync_service: sync_service.clone(), + system_rpc_tx, + tx_handler_controller, + telemetry: telemetry.as_mut(), + })?; + + if let Some(hwbench) = hwbench { + sc_sysinfo::print_hwbench(&hwbench); + if validator { + warn_if_slow_hardware(&hwbench); + } -fn build_contracts_rpc_extensions( - deny_unsafe: sc_rpc::DenyUnsafe, - client: Arc>, - _backend: Arc, - pool: Arc>>, -) -> Result, sc_service::Error> { - let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + ); + } + } - crate::rpc::create_contracts_rococo(deps).map_err(Into::into) -} + let announce_block = { + let sync_service = sync_service.clone(); + Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + }; + + let relay_chain_slot_duration = Duration::from_secs(6); + + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + + start_relay_chain_tasks(StartRelayChainTasksParams { + client: client.clone(), + announce_block: announce_block.clone(), + para_id, + relay_chain_interface: relay_chain_interface.clone(), + task_manager: &mut task_manager, + da_recovery_profile: if validator { + DARecoveryProfile::Collator + } else { + DARecoveryProfile::FullNode + }, + import_queue: import_queue_service, + relay_chain_slot_duration, + recovery_handle: Box::new(overseer_handle.clone()), + sync_service, + })?; + + if validator { + Self::StartConsensus::start_consensus( + client.clone(), + block_import, + prometheus_registry.as_ref(), + telemetry.as_ref().map(|t| t.handle()), + &task_manager, + relay_chain_interface.clone(), + transaction_pool, + params.keystore_container.keystore(), + relay_chain_slot_duration, + para_id, + collator_key.expect("Command line arguments do not allow this. qed"), + overseer_handle, + announce_block, + backend.clone(), + )?; + } -/// Start a polkadot-shell parachain node. -pub async fn start_shell_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Unresistant, // free-for-all consensus - para_id, - |_, _, _, _| Ok(RpcModule::new(())), - build_shell_import_queue, - start_relay_chain_consensus, - hwbench, - ) - .await -} + start_network.start_network(); -enum BuildOnAccess { - Uninitialized(Option R + Send + Sync>>), - Initialized(R), + Ok(task_manager) + }) + } } -impl BuildOnAccess { - fn get_mut(&mut self) -> &mut R { - loop { - match self { - Self::Uninitialized(f) => { - *self = Self::Initialized((f.take().unwrap())()); - }, - Self::Initialized(ref mut r) => return r, - } - } +/// Build the import queue for the shell runtime. +pub(crate) struct BuildShellImportQueue(PhantomData); + +impl BuildImportQueue for BuildShellImportQueue { + fn build_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + _telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result> { + cumulus_client_consensus_relay_chain::import_queue( + client, + block_import, + |_, _| async { Ok(()) }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + ) + .map_err(Into::into) } } -/// Special [`ParachainConsensus`] implementation that waits for the upgrade from -/// shell to a parachain runtime that implements Aura. -struct WaitForAuraConsensus { - client: Arc, - aura_consensus: Arc>>>>, - relay_chain_consensus: Arc>>>, - _phantom: PhantomData, -} +pub(crate) struct ShellNode; -impl Clone for WaitForAuraConsensus { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - aura_consensus: self.aura_consensus.clone(), - relay_chain_consensus: self.relay_chain_consensus.clone(), - _phantom: PhantomData, - } - } -} +impl NodeSpec for ShellNode { + type RuntimeApi = FakeRuntimeApi; + type BuildImportQueue = BuildShellImportQueue; + type BuildRpcExtensions = BuildEmptyRpcExtensions; + type StartConsensus = StartRelayChainConsensus; -#[async_trait::async_trait] -impl ParachainConsensus for WaitForAuraConsensus -where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Codec + Sync, -{ - async fn produce_candidate( - &mut self, - parent: &Header, - relay_parent: PHash, - validation_data: &PersistedValidationData, - ) -> Option> { - if self - .client - .runtime_api() - .has_api::>(parent.hash()) - .unwrap_or(false) - { - self.aura_consensus - .lock() - .await - .get_mut() - .produce_candidate(parent, relay_parent, validation_data) - .await - } else { - self.relay_chain_consensus - .lock() - .await - .produce_candidate(parent, relay_parent, validation_data) - .await - } - } + const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Unresistant; } struct Verifier { client: Arc, - aura_verifier: BuildOnAccess>>, + aura_verifier: Box>, relay_chain_verifier: Box>, _phantom: PhantomData, } @@ -594,21 +410,16 @@ struct Verifier { #[async_trait::async_trait] impl VerifierT for Verifier where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Sync + Codec, + Client: ProvideRuntimeApi + Send + Sync, + Client::Api: AuraRuntimeApi, + AuraId: AuraIdT + Sync, { async fn verify( - &mut self, + &self, block_import: BlockImportParams, ) -> Result, String> { - if self - .client - .runtime_api() - .has_api::>(*block_import.header.parent_hash()) - .unwrap_or(false) - { - self.aura_verifier.get_mut().verify(block_import).await + if self.client.runtime_api().has_aura_api(*block_import.header.parent_hash()) { + self.aura_verifier.verify(block_import).await } else { self.relay_chain_verifier.verify(block_import).await } @@ -617,422 +428,374 @@ where /// Build the import queue for parachain runtimes that started with relay chain consensus and /// switched to aura. -pub fn build_relay_to_aura_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry_handle: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> +pub(crate) struct BuildRelayToAuraImportQueue( + PhantomData<(RuntimeApi, AuraId)>, +); + +impl BuildImportQueue + for BuildRelayToAuraImportQueue where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + sp_consensus_aura::AuraApi::Pair as Pair>::Public>, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, { - let verifier_client = client.clone(); - - let aura_verifier = move || { - Box::new(cumulus_client_consensus_aura::build_verifier::< - ::Pair, - _, - _, - _, - >(cumulus_client_consensus_aura::BuildVerifierParams { - client: verifier_client.clone(), - create_inherent_data_providers: move |parent_hash, _| { - let cidp_client = verifier_client.clone(); - async move { - let slot_duration = cumulus_client_consensus_aura::slot_duration_at( - &*cidp_client, - parent_hash, - )?; - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - } - }, - telemetry: telemetry_handle, - })) as Box<_> - }; + fn build_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result> { + let verifier_client = client.clone(); + + let aura_verifier = + cumulus_client_consensus_aura::build_verifier::<::Pair, _, _, _>( + cumulus_client_consensus_aura::BuildVerifierParams { + client: verifier_client.clone(), + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = verifier_client.clone(); + async move { + let slot_duration = cumulus_client_consensus_aura::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp)) + } + }, + telemetry: telemetry_handle, + }, + ); - let relay_chain_verifier = - Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>; + let relay_chain_verifier = + Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })); - let verifier = Verifier { - client, - relay_chain_verifier, - aura_verifier: BuildOnAccess::Uninitialized(Some(Box::new(aura_verifier))), - _phantom: PhantomData, - }; + let verifier = Verifier { + client, + relay_chain_verifier, + aura_verifier: Box::new(aura_verifier), + _phantom: PhantomData, + }; - let registry = config.prometheus_registry(); - let spawner = task_manager.spawn_essential_handle(); + let registry = config.prometheus_registry(); + let spawner = task_manager.spawn_essential_handle(); - Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) + Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) + } } /// Uses the lookahead collator to support async backing. /// /// Start an aura powered parachain node. Some system chains use this. -pub async fn start_generic_aura_lookahead_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_relay_to_aura_import_queue::<_, AuraId>, - start_lookahead_aura_consensus, - hwbench, - ) - .await +pub(crate) struct AuraNode( + pub PhantomData<(RuntimeApi, AuraId, StartConsensus)>, +); + +impl Default for AuraNode { + fn default() -> Self { + Self(Default::default()) + } } -/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub -/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and -/// needs to sync and upgrade before it can run `AuraApi` functions. -/// -/// Uses the lookahead collator to support async backing. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -pub async fn start_asset_hub_lookahead_node< - RuntimeApi, - AuraId: AppCrypto + Send + Codec + Sync, - Net, ->( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +impl NodeSpec for AuraNode where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + substrate_frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, - Net: NetworkBackend, + + substrate_frame_rpc_system::AccountNonceApi, + AuraId: AuraIdT + Sync, + StartConsensus: self::StartConsensus + 'static, { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_relay_to_aura_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - backend| { - let relay_chain_interface2 = relay_chain_interface.clone(); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let spawner = task_manager.spawn_handle(); - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - spawner, - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let collation_future = Box::pin(async move { - // Start collating with the `shell` runtime while waiting for an upgrade to an Aura - // compatible runtime. - let mut request_stream = cumulus_client_collator::relay_chain_driven::init( - collator_key.clone(), - para_id, - overseer_handle.clone(), - ) - .await; - while let Some(request) = request_stream.next().await { - let pvd = request.persisted_validation_data().clone(); - let last_head_hash = - match ::Header::decode(&mut &pvd.parent_head.0[..]) { - Ok(header) => header.hash(), - Err(e) => { - log::error!("Could not decode the head data: {e}"); - request.complete(None); - continue - }, - }; - - // Check if we have upgraded to an Aura compatible runtime and transition if - // necessary. - if client - .runtime_api() - .has_api::>(last_head_hash) - .unwrap_or(false) - { - // Respond to this request before transitioning to Aura. - request.complete(None); - break - } - } - - // Move to Aura consensus. - let proposer = Proposer::new(proposer_factory); + type RuntimeApi = RuntimeApi; + type BuildImportQueue = BuildRelayToAuraImportQueue; + type BuildRpcExtensions = BuildParachainRpcExtensions; + type StartConsensus = StartConsensus; + const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Resistant; +} - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface2, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: true, /* we need to always re-initialize for asset-hub moving - * to aura */ - }; - - aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params) - .await - }); - - let spawner = task_manager.spawn_essential_handle(); - spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); - - Ok(()) - }, - hwbench, - ) - .await +pub fn new_aura_node_spec(extra_args: NodeExtraArgs) -> Box +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + substrate_frame_rpc_system::AccountNonceApi, + AuraId: AuraIdT + Sync, +{ + if extra_args.use_slot_based_consensus { + Box::new(AuraNode::< + RuntimeApi, + AuraId, + StartSlotBasedAuraConsensus, + >::default()) + } else { + Box::new(AuraNode::< + RuntimeApi, + AuraId, + StartLookaheadAuraConsensus, + >::default()) + } } /// Start relay-chain consensus that is free for all. Everyone can submit a block, the relay-chain /// decides what is backed and included. -fn start_relay_chain_consensus( - client: Arc>, - block_import: ParachainBlockImport, - prometheus_registry: Option<&Registry>, - telemetry: Option, - task_manager: &TaskManager, - relay_chain_interface: Arc, - transaction_pool: Arc>>, - _sync_oracle: Arc>, - _keystore: KeystorePtr, - _relay_chain_slot_duration: Duration, - para_id: ParaId, - collator_key: CollatorPair, - overseer_handle: OverseerHandle, - announce_block: Arc>) + Send + Sync>, - _backend: Arc, -) -> Result<(), sc_service::Error> { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry, - ); - - let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( - cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { - para_id, - proposer_factory, - block_import, - relay_chain_interface: relay_chain_interface.clone(), - create_inherent_data_providers: move |_, (relay_parent, validation_data)| { - let relay_chain_interface = relay_chain_interface.clone(); - async move { - let parachain_inherent = +pub(crate) struct StartRelayChainConsensus; + +impl StartConsensus for StartRelayChainConsensus { + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + _keystore: KeystorePtr, + _relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + _backend: Arc, + ) -> Result<(), Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry, + ); + + let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( + cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { + para_id, + proposer_factory, + block_import, + relay_chain_interface: relay_chain_interface.clone(), + create_inherent_data_providers: move |_, (relay_parent, validation_data)| { + let relay_chain_interface = relay_chain_interface.clone(); + async move { + let parachain_inherent = cumulus_client_parachain_inherent::ParachainInherentDataProvider::create_at( relay_parent, &relay_chain_interface, &validation_data, para_id, ).await; - let parachain_inherent = parachain_inherent.ok_or_else(|| { - Box::::from( - "Failed to create parachain inherent", - ) - })?; - Ok(parachain_inherent) - } + let parachain_inherent = parachain_inherent.ok_or_else(|| { + Box::::from( + "Failed to create parachain inherent", + ) + })?; + Ok(parachain_inherent) + } + }, }, - }, - ); - - let spawner = task_manager.spawn_handle(); - - // Required for free-for-all consensus - #[allow(deprecated)] - old_consensus::start_collator_sync(old_consensus::StartCollatorParams { - para_id, - block_status: client.clone(), - announce_block, - overseer_handle, - spawner, - key: collator_key, - parachain_consensus: free_for_all, - runtime_api: client.clone(), - }); - - Ok(()) + ); + + let spawner = task_manager.spawn_handle(); + + // Required for free-for-all consensus + #[allow(deprecated)] + old_consensus::start_collator_sync(old_consensus::StartCollatorParams { + para_id, + block_status: client.clone(), + announce_block, + overseer_handle, + spawner, + key: collator_key, + parachain_consensus: free_for_all, + runtime_api: client.clone(), + }); + + Ok(()) + } } /// Start consensus using the lookahead aura collator. -fn start_lookahead_aura_consensus( - client: Arc>, - block_import: ParachainBlockImport, - prometheus_registry: Option<&Registry>, - telemetry: Option, - task_manager: &TaskManager, - relay_chain_interface: Arc, - transaction_pool: Arc>>, - sync_oracle: Arc>, - keystore: KeystorePtr, - relay_chain_slot_duration: Duration, - para_id: ParaId, - collator_key: CollatorPair, - overseer_handle: OverseerHandle, - announce_block: Arc>) + Send + Sync>, - backend: Arc, -) -> Result<(), sc_service::Error> { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer: Proposer::new(proposer_factory), - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, - }; +pub(crate) struct StartSlotBasedAuraConsensus( + PhantomData<(RuntimeApi, AuraId)>, +); - let fut = aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); +impl StartConsensus + for StartSlotBasedAuraConsensus +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + _overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, + ) -> Result<(), Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); - Ok(()) + let proposer = Proposer::new(proposer_factory); + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); + + let client_for_aura = client.clone(); + let params = SlotBasedParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend.clone(), + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client_for_aura.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, + keystore, + collator_key, + para_id, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, + slot_drift: Duration::from_secs(1), + }; + + let (collation_future, block_builder_future) = + slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); + + task_manager.spawn_essential_handle().spawn( + "collation-task", + Some("parachain-block-authoring"), + collation_future, + ); + task_manager.spawn_essential_handle().spawn( + "block-builder-task", + Some("parachain-block-authoring"), + block_builder_future, + ); + Ok(()) + } } -/// Start an aura powered parachain node which uses the lookahead collator to support async backing. -/// This node is basic in the sense that its runtime api doesn't include common contents such as -/// transaction payment. Used for aura glutton. -pub async fn start_basic_lookahead_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_, _, _, _| Ok(RpcModule::new(())), - build_relay_to_aura_import_queue::<_, AuraId>, - start_lookahead_aura_consensus, - hwbench, - ) - .await +/// Wait for the Aura runtime API to appear on chain. +/// This is useful for chains that started out without Aura. Components that +/// are depending on Aura functionality will wait until Aura appears in the runtime. +async fn wait_for_aura(client: Arc>) +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + let finalized_hash = client.chain_info().finalized_hash; + if client.runtime_api().has_aura_api(finalized_hash) { + return; + }; + + let mut stream = client.finality_notification_stream(); + while let Some(notification) = stream.next().await { + if client.runtime_api().has_aura_api(notification.hash) { + return; + } + } } -/// Start a parachain node for Rococo Contracts. -pub async fn start_contracts_rococo_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_contracts_rpc_extensions, - build_aura_import_queue, - start_lookahead_aura_consensus, - hwbench, - ) - .await +/// Start consensus using the lookahead aura collator. +pub(crate) struct StartLookaheadAuraConsensus( + PhantomData<(RuntimeApi, AuraId)>, +); + +impl StartConsensus + for StartLookaheadAuraConsensus +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, + ) -> Result<(), Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); + + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend, + relay_client: relay_chain_interface, + code_hash_provider: { + let client = client.clone(); + move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + } + }, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer: Proposer::new(proposer_factory), + collator_service, + authoring_duration: Duration::from_millis(1500), + reinitialize: false, + }; + + let fut = async move { + wait_for_aura(client).await; + aura::run::::Pair, _, _, _, _, _, _, _, _>(params).await; + }; + task_manager.spawn_essential_handle().spawn("aura", None, fut); + + Ok(()) + } } /// Checks that the hardware meets the requirements and print a warning otherwise. @@ -1047,3 +810,177 @@ fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { ); } } + +type SyncCmdResult = sc_cli::Result<()>; + +type AsyncCmdResult<'a> = + sc_cli::Result<(Pin + 'a>>, TaskManager)>; + +pub(crate) trait DynNodeSpec { + fn prepare_check_block_cmd( + self: Box, + config: Configuration, + cmd: &CheckBlockCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_export_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ExportBlocksCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_export_state_cmd( + self: Box, + config: Configuration, + cmd: &ExportStateCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_import_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ImportBlocksCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_revert_cmd( + self: Box, + config: Configuration, + cmd: &RevertCmd, + ) -> AsyncCmdResult<'_>; + + fn run_export_genesis_head_cmd( + self: Box, + config: Configuration, + cmd: &ExportGenesisHeadCommand, + ) -> SyncCmdResult; + + fn run_benchmark_block_cmd( + self: Box, + config: Configuration, + cmd: &BlockCmd, + ) -> SyncCmdResult; + + #[cfg(any(feature = "runtime-benchmarks"))] + fn run_benchmark_storage_cmd( + self: Box, + config: Configuration, + cmd: &StorageCmd, + ) -> SyncCmdResult; + + fn start_node( + self: Box, + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, + ) -> Pin>>>; +} + +impl DynNodeSpec for T +where + T: NodeSpec, +{ + fn prepare_check_block_cmd( + self: Box, + config: Configuration, + cmd: &CheckBlockCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager)) + } + + fn prepare_export_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ExportBlocksCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, config.database)), partial.task_manager)) + } + + fn prepare_export_state_cmd( + self: Box, + config: Configuration, + cmd: &ExportStateCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, config.chain_spec)), partial.task_manager)) + } + + fn prepare_import_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ImportBlocksCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager)) + } + + fn prepare_revert_cmd( + self: Box, + config: Configuration, + cmd: &RevertCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, partial.backend, None)), partial.task_manager)) + } + + fn run_export_genesis_head_cmd( + self: Box, + config: Configuration, + cmd: &ExportGenesisHeadCommand, + ) -> SyncCmdResult { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + cmd.run(partial.client) + } + + fn run_benchmark_block_cmd( + self: Box, + config: Configuration, + cmd: &BlockCmd, + ) -> SyncCmdResult { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + cmd.run(partial.client) + } + + #[cfg(any(feature = "runtime-benchmarks"))] + fn run_benchmark_storage_cmd( + self: Box, + config: Configuration, + cmd: &StorageCmd, + ) -> SyncCmdResult { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + let db = partial.backend.expose_db(); + let storage = partial.backend.expose_storage(); + + cmd.run(config, partial.client, db, storage) + } + + fn start_node( + self: Box, + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, + ) -> Pin>>> { + match parachain_config.network.network_backend { + sc_network::config::NetworkBackendType::Libp2p => + ::start_node::>( + parachain_config, + polkadot_config, + collator_options, + para_id, + hwbench, + ), + sc_network::config::NetworkBackendType::Litep2p => + ::start_node::( + parachain_config, + polkadot_config, + collator_options, + para_id, + hwbench, + ), + } + } +} diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index ef96f334d63753c73de669ddcd98b6868a88389b..f17c2035edd66a46be4810fa470357c1ccd3fdbd 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -10,17 +10,17 @@ description = "Core primitives for Aura in Cumulus" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Substrate -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-api = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Polkadot -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +polkadot-core-primitives = { workspace = true } +polkadot-primitives = { workspace = true } [features] default = ["std"] diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 595aa5f72bf2453edea23e372865de95e9e46699..f41213e9485e2509965a25ce556f687a185bba9a 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -10,20 +10,20 @@ description = "Cumulus related core primitive types and traits" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } # Polkadot -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false } -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +polkadot-core-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-primitives = { workspace = true } +xcm = { workspace = true } [features] default = ["std"] diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 0156eb02e2b4aaa9ee02e4e237f305c20792569d..c07fe07545614c375634027df171948dbdb95285 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -10,20 +10,20 @@ license = "Apache-2.0" workspace = true [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +async-trait = { optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true, default-features = false } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true, default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { optional = true, workspace = true } +sp-state-machine = { optional = true, workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml index dd584ce86b2e3172563848f028730709e1b1600d..e61c865d05fb07aaff931f0ecd59a09c2592a0fa 100644 --- a/cumulus/primitives/proof-size-hostfunction/Cargo.toml +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -10,14 +10,14 @@ license = "Apache-2.0" workspace = true [dependencies] -sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } -sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-runtime-interface = { workspace = true } +sp-externalities = { workspace = true } +sp-trie = { workspace = true } [dev-dependencies] -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } +sp-state-machine = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index bdfb83ad72a96930c1dae2d2c054a2c19c5cfcb2..af32fb68d8bb20e6c61b2065ddfe948c98bd64a6 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -10,24 +10,24 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -cumulus-primitives-core = { path = "../core", default-features = false } -cumulus-primitives-proof-size-hostfunction = { path = "../proof-size-hostfunction", default-features = false } -docify = "0.2.8" +cumulus-primitives-core = { workspace = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true } +docify = { workspace = true } [dev-dependencies] -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -cumulus-test-runtime = { path = "../../test/runtime" } +sp-trie = { workspace = true } +sp-io = { workspace = true } +cumulus-test-runtime = { workspace = true } [features] default = ["std"] diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index c09c12d7a0abf8ac3c64974390d47880af6ec6e9..35fa334f51c6996c48825cec03a0a90df5b0acbe 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -201,7 +201,7 @@ mod tests { use super::*; use frame_support::{ assert_ok, - dispatch::DispatchClass, + dispatch::{DispatchClass, PerDispatchClass}, weights::{Weight, WeightMeter}, }; use frame_system::{BlockWeight, CheckWeight}; @@ -215,7 +215,7 @@ mod tests { pages: 0u64, }); const ALICE: AccountId32 = AccountId32::new([1u8; 32]); - const LEN: usize = 0; + const LEN: usize = 150; pub fn new_test_ext() -> sp_io::TestExternalities { let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() @@ -256,6 +256,10 @@ mod tests { }); } + fn get_storage_weight() -> PerDispatchClass { + BlockWeight::::get() + } + #[test] fn basic_refund() { // The real cost will be 100 bytes of storage size @@ -268,6 +272,9 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Should add 500 + 150 (len) to weight. + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -283,7 +290,7 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 600); + assert_eq!(get_storage_weight().total().proof_size(), 1250); }) } @@ -299,6 +306,9 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Adds 500 + 150 (len) weight + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -313,7 +323,7 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 1000); + assert_eq!(get_storage_weight().total().proof_size(), 1650); }) } @@ -327,6 +337,9 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Weight added should be 100 + 150 (len) + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -342,7 +355,10 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 1100); + assert_eq!( + get_storage_weight().total().proof_size(), + 1100 + LEN as u64 + info.weight.proof_size() + ); }) } @@ -354,6 +370,8 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -368,7 +386,8 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 0); + // Proof size should be exactly equal to extrinsic length + assert_eq!(get_storage_weight().total().proof_size(), LEN as u64); }); } @@ -382,12 +401,17 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Adds 500 + 150 (len) weight, total weight is 1950 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(300)); + // Refund 500 unspent weight according to `post_info`, total weight is now 1650 assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + // Recorded proof size is negative -200, total weight is now 1450 assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -396,7 +420,7 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 800); + assert_eq!(get_storage_weight().total().proof_size(), 1450); }); } @@ -416,6 +440,9 @@ mod tests { pays_fee: Default::default(), }; + // Should add 300 + 150 (len) of weight + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -432,7 +459,8 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 900); + // Reclaimed 100 + assert_eq!(get_storage_weight().total().proof_size(), 1350); }) } @@ -451,6 +479,9 @@ mod tests { pays_fee: Default::default(), }; + // Adds 50 + 150 (len) weight, total weight 1200 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -458,7 +489,56 @@ mod tests { // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. + + // Refunds unspent 25 weight according to `post_info`, 1175 assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + // Adds 200 - 25 (unspent) == 175 weight, total weight 1350 + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) + } + + #[test] + fn test_nothing_relcaimed() { + let mut test_ext = setup_test_externalities(&[100, 200]); + + test_ext.execute_with(|| { + set_current_storage_weight(0); + // Benchmarked storage weight: 100 + let info = DispatchInfo { weight: Weight::from_parts(100, 100), ..Default::default() }; + + // Actual proof size is 100 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 100)), + pays_fee: Default::default(), + }; + + // Adds benchmarked weight 100 + 150 (len), total weight is now 250 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + + // Weight should go up by 150 len + 100 proof size weight, total weight 250 + assert_eq!(get_storage_weight().total().proof_size(), 250); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + // Should return `setup_test_externalities` proof recorder value: 100. + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + // Nothing to refund, unspent is 0, total weight 250 + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, LEN, &Ok(()))); + // `setup_test_externalities` proof recorder value: 200, so this means the extrinsic + // actually used 100 proof size. + // Nothing to refund or add, weight matches proof recorder assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -467,7 +547,9 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + // Check block len weight was not reclaimed: + // 100 weight + 150 extrinsic len == 250 proof size + assert_eq!(get_storage_weight().total().proof_size(), 250); }) } @@ -487,11 +569,15 @@ mod tests { pays_fee: Default::default(), }; + // Adds 300 + 150 (len) weight, total weight 1450 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); + // This refunds 100 - 50(unspent), total weight is now 1400 assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -504,7 +590,8 @@ mod tests { // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_eq!(BlockWeight::::get().total().proof_size(), 900); + // Above call refunds 50 (unspent), total weight is 1350 now + assert_eq!(get_storage_weight().total().proof_size(), 1350); }) } @@ -523,11 +610,15 @@ mod tests { pays_fee: Default::default(), }; + // Adds 50 + 150 (len) weight, total weight is 1200 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); + // Adds additional 150 weight recorded assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -540,7 +631,7 @@ mod tests { // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + assert_eq!(get_storage_weight().total().proof_size(), 1350); }) } @@ -644,7 +735,7 @@ mod tests { // We reclaimed 3 bytes of storage size! assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); - assert_eq!(BlockWeight::::get().total().proof_size(), 10); + assert_eq!(get_storage_weight().total().proof_size(), 10); assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); } } diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index 7a6f4787ba3121cf0c9c7eec3b9f3794c870037d..f7bf53a9d7d7cb4180f7afd9bc0c574e89675513 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -10,16 +10,16 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -futures = "0.3.28" +codec = { features = ["derive"], workspace = true } +futures = { workspace = true } # Substrate -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-timestamp = { path = "../../../substrate/primitives/timestamp", default-features = false } +sp-inherents = { workspace = true } +sp-std = { workspace = true } +sp-timestamp = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 85e3ac2f7606c9e12aa4d38f0c44c3fe1818b107..a2fa2dd9806dca4743d13215ae4a30b7738567b7 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -10,25 +10,25 @@ description = "Helper datatypes for Cumulus" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -pallet-asset-conversion = { path = "../../../substrate/frame/asset-conversion", default-features = false } +frame-support = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +pallet-asset-conversion = { workspace = true } # Polkadot -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-runtime-parachains = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 120983eb9390e9007c13b08c763f13446011f98e..fbbaab73ce7695c62cfbe1e6ce62d67ba5a68509 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -9,43 +9,43 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Substrate -sc-service = { path = "../../../substrate/client/service" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-aura = { path = "../../../substrate/client/consensus/aura" } -sc-block-builder = { path = "../../../substrate/client/block-builder" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-executor-common = { path = "../../../substrate/client/executor/common" } -substrate-test-client = { path = "../../../substrate/test-utils/client" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-inherents = { path = "../../../substrate/primitives/inherents" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -frame-system = { path = "../../../substrate/frame/system" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -pallet-balances = { path = "../../../substrate/frame/balances" } +sc-service = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-test-runtime = { path = "../runtime" } -cumulus-test-service = { path = "../service" } -cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } +cumulus-test-runtime = { workspace = true } +cumulus-test-service = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } [features] runtime-benchmarks = [ diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index d233ad2691768c0c1d563c3a0f4c62b44f4c9b23..f26413e441e72b7fca558043f0507de91db4746f 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -79,6 +79,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { cumulus_test_service::chain_spec::get_chain_spec_with_extra_endowed( None, self.endowed_accounts.clone(), + cumulus_test_runtime::WASM_BINARY.expect("WASM binary not compiled!"), ) .build_storage() .expect("Builds test runtime genesis storage") diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index d775c61f7801e98b4c8e8436eb95c1ec86854d77..454266c90ba74008bbe33d65891e16fb516737e4 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -10,19 +10,19 @@ description = "Mocked relay state proof builder for testing Cumulus." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Substrate -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true } +sp-trie = { workspace = true } +sp-std = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +polkadot-primitives = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index b14e3b7f040e74cfabe834d770859433cdb1553d..d5582f2d2a23b28832a5731e1f0ae30ede21d266 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -9,48 +9,48 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-glutton = { path = "../../../substrate/frame/glutton", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-sudo = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-glutton = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-session = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-std = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } -cumulus-primitives-aura = { path = "../../primitives/aura", default-features = false } -pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } -cumulus-pallet-aura-ext = { path = "../../pallets/aura-ext", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } +parachain-info = { workspace = true } +cumulus-primitives-aura = { workspace = true } +pallet-collator-selection = { workspace = true } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -93,3 +93,4 @@ std = [ "substrate-wasm-builder", ] increment-spec-version = [] +elastic-scaling = [] diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index ebd5c178cba07e2889b6501a9be490344467d228..bf579f4121e5f6b48fa5ba310c76f7cdd02fa7a0 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -24,6 +24,13 @@ fn main() { .enable_feature("increment-spec-version") .set_file_name("wasm_binary_spec_version_incremented.rs") .build(); + + WasmBuilder::new() + .with_current_project() + .enable_feature("elastic-scaling") + .import_memory() + .set_file_name("wasm_binary_elastic_scaling.rs") + .build(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 452b3241d0bfa4d374e3af77ad15dff8aa15a250..97cb02ab779effdffedac4b4b1bebfe3e5970e5b 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -27,6 +27,11 @@ pub mod wasm_spec_version_incremented { include!(concat!(env!("OUT_DIR"), "/wasm_binary_spec_version_incremented.rs")); } +pub mod elastic_scaling { + #[cfg(feature = "std")] + include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling.rs")); +} + mod test_pallet; use frame_support::{derive_impl, traits::OnRuntimeUpgrade, PalletId}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; @@ -66,7 +71,7 @@ use frame_system::{ pub use pallet_balances::Call as BalancesCall; pub use pallet_glutton::Call as GluttonCall; pub use pallet_sudo::Call as SudoCall; -pub use pallet_timestamp::Call as TimestampCall; +pub use pallet_timestamp::{Call as TimestampCall, Now}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; @@ -83,8 +88,23 @@ impl_opaque_keys! { /// The para-id used in this runtime. pub const PARACHAIN_ID: u32 = 100; -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; +#[cfg(not(feature = "elastic-scaling"))] +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 4; +#[cfg(not(feature = "elastic-scaling"))] const BLOCK_PROCESSING_VELOCITY: u32 = 1; + +#[cfg(feature = "elastic-scaling")] +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 7; +#[cfg(feature = "elastic-scaling")] +const BLOCK_PROCESSING_VELOCITY: u32 = 4; + +#[cfg(not(feature = "elastic-scaling"))] +pub const MILLISECS_PER_BLOCK: u64 = 6000; +#[cfg(feature = "elastic-scaling")] +pub const MILLISECS_PER_BLOCK: u64 = 2000; + +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; // The only difference between the two declarations below is the `spec_version`. With the @@ -126,10 +146,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 1, }; -pub const MILLISECS_PER_BLOCK: u64 = 6000; - -pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES; // These time units are defined in number of blocks. @@ -499,7 +515,7 @@ impl_runtime_apis! { impl crate::GetLastTimestamp for Runtime { fn get_last_timestamp() -> u64 { - Timestamp::now() + Now::::get() } } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 732d884528f8974b4d09b33c118f41b74d22f348..f766d123632096a4af578fcf87bdb0b299008028 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -13,96 +13,94 @@ name = "test-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.79" -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } -criterion = { version = "0.5.1", features = ["async_tokio"] } -jsonrpsee = { version = "0.22", features = ["server"] } -rand = "0.8.5" +async-trait = { workspace = true } +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } +criterion = { features = ["async_tokio"], workspace = true, default-features = true } +jsonrpsee = { features = ["server"], workspace = true } +rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -tokio = { version = "1.32.0", features = ["macros"] } -tracing = "0.1.37" -url = "2.4.0" -tempfile = "3.8.0" +tokio = { features = ["macros"], workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } +url = { workspace = true } +tempfile = { workspace = true } # Substrate -frame-system = { path = "../../../substrate/frame/system" } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-aura = { path = "../../../substrate/client/consensus/aura" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-service = { path = "../../../substrate/client/service" } -sc-tracing = { path = "../../../substrate/client/tracing" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -substrate-test-client = { path = "../../../substrate/test-utils/client" } -sc-cli = { path = "../../../substrate/client/cli" } -sc-block-builder = { path = "../../../substrate/client/block-builder" } -sc-executor-wasmtime = { path = "../../../substrate/client/executor/wasmtime" } -sc-executor-common = { path = "../../../substrate/client/executor/common" } +frame-system = { workspace = true, default-features = true } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } +sc-cli = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-service = { path = "../../../polkadot/node/service" } -polkadot-test-service = { path = "../../../polkadot/node/test/service" } -polkadot-cli = { path = "../../../polkadot/cli" } -polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } -polkadot-overseer = { path = "../../../polkadot/node/overseer" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } +polkadot-test-service = { workspace = true } +polkadot-cli = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } # Cumulus -cumulus-client-cli = { path = "../../client/cli" } -parachains-common = { path = "../../parachains/common" } -cumulus-client-consensus-common = { path = "../../client/consensus/common" } -cumulus-client-consensus-proposer = { path = "../../client/consensus/proposer" } -cumulus-client-consensus-aura = { path = "../../client/consensus/aura" } -cumulus-client-consensus-relay-chain = { path = "../../client/consensus/relay-chain" } -cumulus-client-parachain-inherent = { path = "../../client/parachain-inherent" } -cumulus-client-service = { path = "../../client/service" } -cumulus-client-collator = { path = "../../client/collator" } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-inprocess-interface = { path = "../../client/relay-chain-inprocess-interface" } -cumulus-relay-chain-interface = { path = "../../client/relay-chain-interface" } -cumulus-test-runtime = { path = "../runtime" } -cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-node" } -cumulus-client-pov-recovery = { path = "../../client/pov-recovery" } -cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } -pallet-timestamp = { path = "../../../substrate/frame/timestamp" } +cumulus-client-cli = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-consensus-aura = { workspace = true, default-features = true } +cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } +cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-client-service = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true } +cumulus-relay-chain-minimal-node = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } [dev-dependencies] -futures = "0.3.28" -portpicker = "0.1.1" -rococo-parachain-runtime = { path = "../../parachains/runtimes/testing/rococo-parachain" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } -cumulus-test-client = { path = "../client" } +futures = { workspace = true } +portpicker = { workspace = true } +sp-authority-discovery = { workspace = true, default-features = true } +cumulus-test-client = { workspace = true } # Polkadot dependencies -polkadot-test-service = { path = "../../../polkadot/node/test/service" } +polkadot-test-service = { workspace = true } # Substrate dependencies -sc-cli = { path = "../../../substrate/client/cli" } -substrate-test-utils = { path = "../../../substrate/test-utils" } +sc-cli = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } [features] runtime-benchmarks = [ @@ -116,7 +114,6 @@ runtime-benchmarks = [ "polkadot-primitives/runtime-benchmarks", "polkadot-service/runtime-benchmarks", "polkadot-test-service/runtime-benchmarks", - "rococo-parachain-runtime/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 174d478f2575caae3f6afe14810c8fe7f6719cb8..ae71028ad486a46f32347f925e645ff0980bd652 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -66,9 +66,10 @@ where pub fn get_chain_spec_with_extra_endowed( id: Option, extra_endowed_accounts: Vec, + code: &[u8], ) -> ChainSpec { ChainSpec::builder( - cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + code, Extensions { para_id: id.unwrap_or(cumulus_test_runtime::PARACHAIN_ID.into()).into() }, ) .with_name("Local Testnet") @@ -83,7 +84,21 @@ pub fn get_chain_spec_with_extra_endowed( /// Get the chain spec for a specific parachain ID. pub fn get_chain_spec(id: Option) -> ChainSpec { - get_chain_spec_with_extra_endowed(id, Default::default()) + get_chain_spec_with_extra_endowed( + id, + Default::default(), + cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + ) +} + +/// Get the chain spec for a specific parachain ID. +pub fn get_elastic_scaling_chain_spec(id: Option) -> ChainSpec { + get_chain_spec_with_extra_endowed( + id, + Default::default(), + cumulus_test_runtime::elastic_scaling::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) } /// Local testnet genesis for testing. diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index 87d1d4af8a95e0edf12efc454d5505a6c1ad7544..37ca27542cbfe0cd0d93ba7eb6a6dfb7a05459c0 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -50,6 +50,12 @@ pub struct TestCollatorCli { #[arg(long)] pub fail_pov_recovery: bool, + + /// EXPERIMENTAL: Use slot-based collator which can handle elastic scaling. + /// + /// Use with care, this flag is unstable and subject to change. + #[arg(long)] + pub experimental_use_slot_based: bool, } #[derive(Debug, clap::Subcommand)] @@ -253,8 +259,16 @@ impl SubstrateCli for TestCollatorCli { fn load_spec(&self, id: &str) -> std::result::Result, String> { Ok(match id { - "" => - Box::new(cumulus_test_service::get_chain_spec(Some(ParaId::from(2000)))) as Box<_>, + "" => { + tracing::info!("Using default test service chain spec."); + Box::new(cumulus_test_service::get_chain_spec(Some(ParaId::from(2000)))) as Box<_> + }, + "elastic-scaling" => { + tracing::info!("Using elastic-scaling chain spec."); + Box::new(cumulus_test_service::get_elastic_scaling_chain_spec(Some(ParaId::from( + 2100, + )))) as Box<_> + }, path => { let chain_spec = cumulus_test_service::chain_spec::ChainSpec::from_json_file(path.into())?; diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 6f8b9d19bb29ba7445b2b67fdd9b0ac4a3263553..51cdebbaf54e036172625682648afd5fd9893d84 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -25,7 +25,10 @@ pub mod chain_spec; use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::{ - collators::lookahead::{self as aura, Params as AuraParams}, + collators::{ + lookahead::{self as aura, Params as AuraParams}, + slot_based::{self as slot_based, Params as SlotBasedParams}, + }, ImportQueueParams, }; use cumulus_client_consensus_proposer::Proposer; @@ -45,7 +48,7 @@ use cumulus_client_cli::{CollatorOptions, RelayChainMode}; use cumulus_client_consensus_common::{ ParachainBlockImport as TParachainBlockImport, ParachainCandidate, ParachainConsensus, }; -use cumulus_client_pov_recovery::RecoveryHandle; +use cumulus_client_pov_recovery::{RecoveryDelayRange, RecoveryHandle}; #[allow(deprecated)] use cumulus_client_service::old_consensus; use cumulus_client_service::{ @@ -304,7 +307,7 @@ async fn build_relay_chain_interface( /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// /// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with(parachain_config.network.node_name.as_str())] +#[sc_tracing::logging::prefix_logs_with("Parachain")] pub async fn start_node_impl>( parachain_config: Configuration, collator_key: Option, @@ -316,6 +319,7 @@ pub async fn start_node_impl>( consensus: Consensus, collator_options: CollatorOptions, proof_recording_during_import: bool, + use_slot_based_collator: bool, ) -> sc_service::error::Result<( TaskManager, Arc, @@ -409,7 +413,6 @@ where } else { Box::new(overseer_handle.clone()) }; - let is_collator = collator_key.is_some(); let relay_chain_slot_duration = Duration::from_secs(6); start_relay_chain_tasks(StartRelayChainTasksParams { @@ -418,11 +421,11 @@ where para_id, relay_chain_interface: relay_chain_interface.clone(), task_manager: &mut task_manager, - da_recovery_profile: if is_collator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, + // Increase speed of recovery for testing purposes. + da_recovery_profile: DARecoveryProfile::Other(RecoveryDelayRange { + min: Duration::from_secs(1), + max: Duration::from_secs(5), + }), import_queue: import_queue_service, relay_chain_slot_duration, recovery_handle, @@ -461,29 +464,72 @@ where ); let client_for_aura = client.clone(); - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend.clone(), - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client_for_aura.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle: sync_service, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(2000), - reinitialize: false, - }; - let fut = aura::run::(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); + if use_slot_based_collator { + tracing::info!(target: LOG_TARGET, "Starting block authoring with slot based authoring."); + let params = SlotBasedParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend.clone(), + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client_for_aura + .code_at(block_hash) + .ok() + .map(|c| ValidationCode::from(c).hash()) + }, + keystore, + collator_key, + para_id, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, + slot_drift: Duration::from_secs(1), + }; + + let (collation_future, block_builer_future) = + slot_based::run::(params); + task_manager.spawn_essential_handle().spawn( + "collation-task", + None, + collation_future, + ); + task_manager.spawn_essential_handle().spawn( + "block-builder-task", + None, + block_builer_future, + ); + } else { + tracing::info!(target: LOG_TARGET, "Starting block authoring with lookahead collator."); + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend.clone(), + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client_for_aura + .code_at(block_hash) + .ok() + .map(|c| ValidationCode::from(c).hash()) + }, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, + }; + + let fut = aura::run::(params); + task_manager.spawn_essential_handle().spawn("aura", None, fut); + } } } @@ -720,6 +766,7 @@ impl TestNodeBuilder { self.consensus, collator_options, self.record_proof_during_import, + false, ) .await .expect("could not create Cumulus test service"), @@ -735,6 +782,7 @@ impl TestNodeBuilder { self.consensus, collator_options, self.record_proof_during_import, + false, ) .await .expect("could not create Cumulus test service"), @@ -766,8 +814,11 @@ pub fn node_config( let root = base_path.path().join(format!("cumulus_test_service_{}", key)); let role = if is_collator { Role::Authority } else { Role::Full }; let key_seed = key.to_seed(); - let mut spec = - Box::new(chain_spec::get_chain_spec_with_extra_endowed(Some(para_id), endowed_accounts)); + let mut spec = Box::new(chain_spec::get_chain_spec_with_extra_endowed( + Some(para_id), + endowed_accounts, + cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + )); let mut storage = spec.as_storage_builder().build_storage().expect("could not build storage"); diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs index 90d37173dd5907ebf1f4a136a284ef50dda816e8..9357978b769a4e8ec6c4f850d37290e3dee1b1e5 100644 --- a/cumulus/test/service/src/main.rs +++ b/cumulus/test/service/src/main.rs @@ -118,6 +118,7 @@ fn main() -> Result<(), sc_cli::Error> { consensus, collator_options, true, + cli.experimental_use_slot_based, ) .await, sc_network::config::NetworkBackendType::Litep2p => @@ -135,6 +136,7 @@ fn main() -> Result<(), sc_cli::Error> { consensus, collator_options, true, + cli.experimental_use_slot_based, ) .await, } diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 0ed77bf5b7073bc9e3041388206203ce7c1829a2..ba1097fba075696e9d40dffc3c4753d9956615b1 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -10,36 +10,36 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -paste = "1.0.14" +codec = { workspace = true, default-features = true } +paste = { workspace = true, default-features = true } log = { workspace = true } -lazy_static = "1.4.0" -impl-trait-for-tuples = "0.2.2" +lazy_static = { workspace = true } +impl-trait-for-tuples = { workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support" } -frame-system = { path = "../../../substrate/frame/system" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-message-queue = { path = "../../../substrate/frame/message-queue" } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue" } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system" } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } -parachains-common = { path = "../../parachains/common" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm" } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor" } -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains" } +xcm = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } diff --git a/cumulus/zombienet/tests/0002-pov_recovery.zndsl b/cumulus/zombienet/tests/0002-pov_recovery.zndsl index b05285c87bff5a69312552d13b9b652bbd9d1bc1..dc7095ced252de497afd0fcd2cedadf460b412d1 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.zndsl +++ b/cumulus/zombienet/tests/0002-pov_recovery.zndsl @@ -2,7 +2,9 @@ Description: PoV recovery test Network: ./0002-pov_recovery.toml Creds: config -# wait 20 blocks and register parachain +# Wait 20 blocks and register parachain. This part is important for pov-recovery. +# We need to make sure that the recovering node is able to see all relay-chain +# notifications containing the candidates to recover. validator-3: reports block height is at least 20 within 250 seconds validator-0: js-script ./register-para.js with "2000" within 240 seconds validator-0: parachain 2000 is registered within 300 seconds diff --git a/cumulus/zombienet/tests/0003-full_node_catching_up.zndsl b/cumulus/zombienet/tests/0003-full_node_catching_up.zndsl index 49b6d9e94fd16d73ce7de3cf102fc2bfad1a6e76..e1e8442f30509c72c49673288920e6d5336a8bfb 100644 --- a/cumulus/zombienet/tests/0003-full_node_catching_up.zndsl +++ b/cumulus/zombienet/tests/0003-full_node_catching_up.zndsl @@ -6,3 +6,6 @@ alice: parachain 2000 is registered within 225 seconds dave: reports block height is at least 7 within 250 seconds eve: reports block height is at least 7 within 250 seconds ferdie: reports block height is at least 7 within 250 seconds + +# We want to make sure that none of the consensus hook checks fail, even if the chain makes progress +charlie: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds diff --git a/cumulus/zombienet/tests/0006-rpc_collator_builds_blocks.zndsl b/cumulus/zombienet/tests/0006-rpc_collator_builds_blocks.zndsl index 7da8416d0161a23f84184d288cb14fdcd3094fd5..b14c15ed5e5b90f8fed878c5c8c82eefbe71b624 100644 --- a/cumulus/zombienet/tests/0006-rpc_collator_builds_blocks.zndsl +++ b/cumulus/zombienet/tests/0006-rpc_collator_builds_blocks.zndsl @@ -13,3 +13,7 @@ two: restart after 1 seconds three: restart after 20 seconds dave: is up dave: reports block height is at least 30 within 200 seconds + +# We want to make sure that none of the consensus hook checks fail, even if the chain makes progress +dave: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds +eve: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds diff --git a/cumulus/zombienet/tests/0008-elastic_authoring.toml b/cumulus/zombienet/tests/0008-elastic_authoring.toml new file mode 100644 index 0000000000000000000000000000000000000000..f2e2010a9e4582feefaebdaa355ab96b6a8f7695 --- /dev/null +++ b/cumulus/zombienet/tests/0008-elastic_authoring.toml @@ -0,0 +1,50 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 6 + allowed_ancestry_len = 3 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + num_cores = 4 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.nodes]] + name = "alice" + args = ["" ] + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=trace" ] + count = 8 + +# Slot based authoring with 3 cores and 2s slot duration +[[parachains]] +id = 2100 +chain = "elastic-scaling" +add_to_genesis = true + + [[parachains.collators]] + name = "collator-elastic" + image = "{{COL_IMAGE}}" + command = "test-parachain" + args = ["-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", "--force-authoring", "--experimental-use-slot-based"] + +# Slot based authoring with 1 core and 6s slot duration +[[parachains]] +id = 2000 +add_to_genesis = true + + [[parachains.collators]] + name = "collator-single-core" + image = "{{COL_IMAGE}}" + command = "test-parachain" + args = ["-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", "--force-authoring", "--experimental-use-slot-based"] diff --git a/cumulus/zombienet/tests/0008-elastic_authoring.zndsl b/cumulus/zombienet/tests/0008-elastic_authoring.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..a06ffd24fefd2bab46de4c80b210959a62660da1 --- /dev/null +++ b/cumulus/zombienet/tests/0008-elastic_authoring.zndsl @@ -0,0 +1,19 @@ +Description: Slot based authoring for elastic scaling +Network: ./0008-elastic_authoring.toml +Creds: config + +alice: is up +collator-elastic: is up +collator-single-core: is up + + +# configure relay chain +alice: js-script ./assign-core.js with "2100,0" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "2100,1" return is 0 within 600 seconds + +collator-single-core: reports block height is at least 20 within 225 seconds +collator-elastic: reports block height is at least 40 within 225 seconds + +# We want to make sure that none of the consensus hook checks fail, even if the chain makes progress +collator-elastic: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds +collator-single-core: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds diff --git a/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml b/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml new file mode 100644 index 0000000000000000000000000000000000000000..b695f8aa937653340534d9366a5eab210888e416 --- /dev/null +++ b/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml @@ -0,0 +1,48 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 6 + allowed_ancestry_len = 3 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + num_cores = 4 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.nodes]] + name = "alice" + args = ["" ] + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=trace", "--reserved-only", "--reserved-nodes {{'alice'|zombie('multiAddress')}}"] + count = 8 + +# Slot based authoring with 3 cores and 2s slot duration +[[parachains]] +id = 2100 +chain = "elastic-scaling" +add_to_genesis = false + + # Slot based authoring with 3 cores and 2s slot duration + [[parachains.collators]] + name = "collator-elastic" + image = "{{COL_IMAGE}}" + command = "test-parachain" + args = ["--disable-block-announcements", "-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", "--force-authoring", "--experimental-use-slot-based"] + + # run 'recovery-target' as a parachain full node + [[parachains.collators]] + name = "recovery-target" + validator = false # full node + image = "{{COL_IMAGE}}" + command = "test-parachain" + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'collator-elastic'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0", "--", "--reserved-only", "--reserved-nodes {{'alice'|zombie('multiAddress')}}"] diff --git a/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl b/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..5cca6120ff3a37dbff4d3d93c8ad977e5404f645 --- /dev/null +++ b/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl @@ -0,0 +1,24 @@ +Description: Elastic scaling PoV recovery test +Network: ./0009-elastic_pov_recovery.toml +Creds: config + +alice: is up +collator-elastic: is up + +# configure relay chain +alice: js-script ./assign-core.js with "2100,0" return is 0 within 200 seconds +alice: js-script ./assign-core.js with "2100,1" return is 0 within 200 seconds + +# Wait 20 blocks and register parachain. This part is important for pov-recovery. +# We need to make sure that the recovering node is able to see all relay-chain +# notifications containing the candidates to recover. +alice: reports block height is at least 20 within 250 seconds +alice: js-script ./register-para.js with "2100" within 240 seconds +alice: parachain 2100 is registered within 300 seconds + + +# check block production +collator-elastic: reports block height is at least 40 within 225 seconds +collator-elastic: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds + +recovery-target: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 35 within 10 seconds diff --git a/cumulus/zombienet/tests/assign-core.js b/cumulus/zombienet/tests/assign-core.js new file mode 100644 index 0000000000000000000000000000000000000000..4179b68b2e3cb139bb48d937b472258e943ddce2 --- /dev/null +++ b/cumulus/zombienet/tests/assign-core.js @@ -0,0 +1,46 @@ +// Assign a parachain to a core. +// +// First argument should be the parachain id. +// Second argument should be the core. +async function run(nodeName, networkInfo, args) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + let para = Number(args[0]); + let core = Number(args[1]); + console.log(`Assigning para ${para} to core ${core}`); + + await zombie.util.cryptoWaitReady(); + + // Submit transaction with Alice accoung + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + // Wait for this transaction to be finalized in a block. + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(core, 0, [[{ task: para }, 57600]], null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + + + return 0; +} + +module.exports = { run }; diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index 196ba861f503c0fc82b6eb0e428df600ce6bfd49..634b9f1882917bbb0f776893242344fe9977b0ef 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -1,7 +1,7 @@ # this image is built on top of existing Zombienet image ARG ZOMBIENET_IMAGE # this image uses substrate-relay image built elsewhere -ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.5.0 +ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.6.5 # metadata ARG VCS_REF diff --git a/docs/contributor/CONTRIBUTING.md b/docs/contributor/CONTRIBUTING.md index 96dc86e9780561e33e24fbc6f0346572d26598b7..7d54b2681b41329c15ab4e3c25f37943ecf0d6a7 100644 --- a/docs/contributor/CONTRIBUTING.md +++ b/docs/contributor/CONTRIBUTING.md @@ -4,18 +4,16 @@ The `Polkadot SDK` project is an **OPENISH Open Source Project** ## What? -Individuals making significant and valuable contributions are given commit-access to the project. -Contributions are done via pull-requests and need to be approved by the maintainers. +Individuals making significant and valuable contributions are given commit-access to the project. Contributions are done +via pull-requests and need to be approved by the maintainers. ## Rules There are a few basic ground-rules for contributors (including the maintainer(s) of the project): -1. **No `--force` pushes** or modifying the master branch history in any way. - If you need to rebase, ensure you do it in your own repo. No rewriting of the history - after the code has been shared (e.g. through a Pull-Request). -2. **Non-master branches**, prefixed with a short name moniker (e.g. `gav-my-feature`) must be - used for ongoing work. +1. **No `--force` pushes** or modifying the master branch history in any way. If you need to rebase, ensure you do it in + your own repo. No rewriting of the history after the code has been shared (e.g. through a Pull-Request). +2. **Non-master branches**, prefixed with a short name moniker (e.g. `gav-my-feature`) must be used for ongoing work. 3. **All modifications** must be made in a **pull-request** to solicit feedback from other contributors. 4. A pull-request **must not be merged until CI** has finished successfully. 5. Contributors should adhere to the [house coding style](./STYLE_GUIDE.md). @@ -25,12 +23,10 @@ There are a few basic ground-rules for contributors (including the maintainer(s) ### In General -A Pull Request (PR) needs to be reviewed and approved by project maintainers. -If a change does not alter any logic (e.g. comments, dependencies, docs), then it may be tagged -`A1-insubstantial` and merged faster. -If it is an urgent fix with no large change to logic, then it may be merged after a non-author -contributor has reviewed it well and approved the review once CI is complete. -No PR should be merged until all reviews' comments are addressed. +* A Pull Request (PR) needs to be reviewed and approved by project maintainers. +* If a change does not alter any logic (e.g. comments, dependencies, docs), then it may be tagged `A1-insubstantial` and +merged faster. +* No PR should be merged until all reviews' comments are addressed. ### Labels @@ -38,39 +34,26 @@ The set of labels and their description can be found [here](https://paritytech.g ### Process -1. Please use our [Pull Request Template](./PULL_REQUEST_TEMPLATE.md) and make sure all relevant - information is reflected in your PR. -2. Please tag each PR with minimum one `T*` label. The respective `T*` labels should signal the - component that was changed, they are also used by downstream users to track changes and to - include these changes properly into their own releases. -3. If youโ€™re still working on your PR, please submit as โ€œDraftโ€. Once a PR is ready for review change - the status to โ€œOpenโ€, so that the maintainers get to review your PR. Generally PRs should sit for - 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. -4. If youโ€™re introducing a major change, that might impact the documentation please add the label - `T13-documentation`. The docs team will get in touch. -5. If your PR changes files in these paths: - - `polkadot` : `^runtime/polkadot` - `polkadot` : `^runtime/kusama` - `polkadot` : `^primitives/src/` - `polkadot` : `^runtime/common` - `substrate` : `^frame/` - `substrate` : `^primitives/` - - It should be added to the [security audit board](https://github.com/orgs/paritytech/projects/103) - and will need to undergo an audit before merge. -6. PRs will be able to be merged once all reviewers' comments are addressed and CI is successful. - -**Noting breaking changes:** -When breaking APIs, the PR description should mention what was changed alongside some examples on how -to change the code to make it work/compile. -It should also mention potential storage migrations and if they require some special setup aside adding -it to the list of migrations in the runtime. +1. Please use our [Pull Request Template](./PULL_REQUEST_TEMPLATE.md) and make sure all relevant information is + reflected in your PR. +2. Please tag each PR with minimum one `T*` label. The respective `T*` labels should signal the component that was + changed, they are also used by downstream users to track changes and to include these changes properly into their own + releases. +3. If youโ€™re still working on your PR, please submit as โ€œDraftโ€. Once a PR is ready for review change the status to + โ€œOpenโ€, so that the maintainers get to review your PR. Generally PRs should sit for 48 hours in order to garner + feedback. It may be merged before if all relevant parties had a look at it. +4. With respect to auditing, please see [AUDIT.md](../AUDIT.md). In general, merging to master can happen independent of + audit. +5. PRs will be able to be merged once all reviewers' comments are addressed and CI is successful. + +**Noting breaking changes:** When breaking APIs, the PR description should mention what was changed alongside some +examples on how to change the code to make it work/compile. It should also mention potential storage migrations and if +they require some special setup aside adding it to the list of migrations in the runtime. ## Reviewing pull requests -When reviewing a pull request, the end-goal is to suggest useful changes to the author. -Reviews should finish with approval unless there are issues that would result in: +When reviewing a pull request, the end-goal is to suggest useful changes to the author. Reviews should finish with +approval unless there are issues that would result in: 1. Buggy behavior. 2. Undue maintenance burden. 3. Breaking with house coding style. @@ -80,18 +63,17 @@ Reviews should finish with approval unless there are issues that would result in The reviewers are also responsible to check: -1. if a changelog is necessary and attached -1. the quality of information in the changelog file -1. the PR has an impact on docs -1. that the docs team was included in the review process of a docs update +* if the PR description is well written to facilitate integration, in case it contains breaking changes. +* the PR has an impact on docs. **Reviews may not be used as an effective veto for a PR because**: 1. There exists a somewhat cleaner/better/faster way of accomplishing the same feature/fix. 2. It does not fit well with some other contributors' longer-term vision for the project. -## Documentation +## `PRDoc` -All Pull Requests must contain proper title & description. +All Pull Requests must contain proper title & description, as described in [Pull Request +Template](./PULL_REQUEST_TEMPLATE.md). Moreover, all pull requests must have a proper `prdoc` file attached. Some Pull Requests can be exempt of `prdoc` documentation, those must be labelled with [`R0-silent`](https://github.com/paritytech/labels/blob/main/ruled_labels/specs_polkadot-sdk.yaml#L89-L91). @@ -102,46 +84,49 @@ See more about `prdoc` [here](./prdoc.md) ## Helping out -We use [labels](https://github.com/paritytech/polkadot-sdk/labels) to manage PRs and issues and communicate -state of a PR. Please familiarise yourself with them. Best way to get started is to a pick a ticket tagged -[easy](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AD0-easy) -or [medium](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AD1-medium) -and get going. Alternatively, look out for issues tagged [mentor](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AC1-mentor) -and get in contact with the mentor offering their support on that larger task. +We use [labels](https://github.com/paritytech/polkadot-sdk/labels) to manage PRs and issues and communicate state of a +PR. Please familiarise yourself with them. Best way to get started is to a pick a ticket tagged +[easy](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AD0-easy) or +[medium](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AD1-medium) and get going. +Alternatively, look out for issues tagged +[mentor](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AC1-mentor) and get in contact +with the mentor offering their support on that larger task. **** ### Issues If what you are looking for is an answer rather than proposing a new feature or fix, search -[https://substrate.stackexchange.com](https://substrate.stackexchange.com/) to see if an post already -exists, and ask if not. Please do not file support issues here. -Before opening a new issue search to see if a similar one already exists and leave a comment that you -also experienced this issue or add your specifics that are related to an existing issue. -Please label issues with the following labels: +[https://substrate.stackexchange.com](https://substrate.stackexchange.com/) to see if an post already exists, and ask if +not. Please do not file support issues here. + +Before opening a new issue search to see if a similar one already exists and leave a comment that you also experienced +this issue or add your specifics that are related to an existing issue. + +Please label issues with the following labels (only relevant for maintainer): 1. `I*` issue severity and type. EXACTLY ONE REQUIRED. 2. `D*` issue difficulty, suggesting the level of complexity this issue has. AT MOST ONE ALLOWED. 3. `T*` Issue topic. MULTIPLE ALLOWED. ## Releases -Declaring formal releases remains the prerogative of the project maintainer(s). +Declaring formal releases remains the prerogative of the project maintainer(s). See [RELEASE.md](../RELEASE.md). ## UI tests -UI tests are used for macros to ensure that the output of a macro doesnโ€™t change and is in the expected format. -These UI tests are sensible to any changes in the macro generated code or to switching the rust stable version. -The tests are only run when the `RUN_UI_TESTS` environment variable is set. So, when the CI is for example complaining -about failing UI tests and it is expected that they fail these tests need to be executed locally. -To simplify the updating of the UI test output there is a script -- `./scripts/update-ui-tests.sh` to update the tests for a current rust version locally -- `./scripts/update-ui-tests.sh 1.70` # to update the tests for a specific rust version locally +UI tests are used for macros to ensure that the output of a macro doesnโ€™t change and is in the expected format. These UI +tests are sensible to any changes in the macro generated code or to switching the rust stable version. The tests are +only run when the `RUN_UI_TESTS` environment variable is set. So, when the CI is for example complaining about failing +UI tests and it is expected that they fail these tests need to be executed locally. To simplify the updating of the UI +test output there is a script +* `./scripts/update-ui-tests.sh` to update the tests for a current rust version locally +* `./scripts/update-ui-tests.sh 1.70` # to update the tests for a specific rust version locally Or if you have opened PR and you're member of `paritytech` - you can use command-bot to run the tests for you in CI: -- `bot update-ui` - will run the tests for the current rust version -- `bot update-ui latest --rust_version=1.70.0` - will run the tests for the specified rust version -- `bot update-ui latest -v CMD_IMAGE=paritytech/ci-unified:bullseye-1.70.0-2023-05-23 --rust_version=1.70.0` - -will run the tests for the specified rust version and specified image +* `bot update-ui` - will run the tests for the current rust version +* `bot update-ui latest --rust_version=1.70.0` - will run the tests for the specified rust version +* `bot update-ui latest -v CMD_IMAGE=paritytech/ci-unified:bullseye-1.70.0-2023-05-23 --rust_version=1.70.0` - will run +the tests for the specified rust version and specified image ## Feature Propagation @@ -157,4 +142,5 @@ Start with comment in PR: `bot help` to see the list of available commands. ## Deprecating code When deprecating and removing code you need to be mindful of how this could impact downstream developers. In order to -mitigate this impact, it is recommended to adhere to the steps outlined in the [Deprecation Checklist](./DEPRECATION_CHECKLIST.md). +mitigate this impact, it is recommended to adhere to the steps outlined in the [Deprecation +Checklist](./DEPRECATION_CHECKLIST.md). diff --git a/docs/contributor/PULL_REQUEST_TEMPLATE.md b/docs/contributor/PULL_REQUEST_TEMPLATE.md index 79a036a235ad92a2cfceeba9b8fb66a44d163dfd..083b30b4a3567904a62c83dd07a21677b7c0e048 100644 --- a/docs/contributor/PULL_REQUEST_TEMPLATE.md +++ b/docs/contributor/PULL_REQUEST_TEMPLATE.md @@ -2,35 +2,42 @@ โœ„ ----------------------------------------------------------------------------- -Thank you for your Pull Request! ๐Ÿ™ Please make sure it follows the contribution guidelines outlined in -[this document](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and fill -out the sections below. Once you're ready to submit your PR for review, please -delete this section and leave only the text under the "Description" heading. +Thank you for your Pull Request! ๐Ÿ™ Please make sure it follows the contribution guidelines outlined in [this +document](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and fill out the +sections below. Once you're ready to submit your PR for review, please delete this section and leave only the text under +the "Description" heading. # Description -*Please include a summary of the changes and the related issue. Please also include relevant motivation and context, -including:* +*A concise description of what your PR is doing, and what potential issue it is solving. Use [Github semantic +linking](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) +to link the PR to an issue that must be closed once this is merged.* -- What does this PR do? -- Why are these changes needed? -- How were these changes implemented and what do they affect? +## Integration -*Use [Github semantic -linking](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) -to address any open issues this PR relates to or closes.* +*In depth notes about how this PR should be integrated by downstream projects. This part is mandatory, and should be +reviewed by reviewers, if the PR does NOT have the `R0-Silent` label. In case of a `R0-Silent`, it can be ignored.* + +## Review Notes + +*In depth notes about the **implenentation** details of your PR. This should be the main guide for reviewers to +understand your approach and effectively review it. If too long, use +[`

`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/details)*. -Fixes # (issue number, *if applicable*) +*Imagine that someone who is depending on the old code wants to integrate your new code and the only information that +they get is this section. It helps to include example usage and default value here, with a `diff` code-block to show +possibly integration.* -Closes # (issue number, *if applicable*) +*Include your leftover TODOs, if any, here.* # Checklist -- [ ] My PR includes a detailed description as outlined in the "Description" section above -- [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` +* [ ] My PR includes a detailed description as outlined in the "Description" and its two subsections above. +* [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` required) -- [ ] I have made corresponding changes to the documentation (if applicable) -- [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) + * External contributors: ask maintainers to put the right label on your PR. +* [ ] I have made corresponding changes to the documentation (if applicable) +* [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) You can remove the "Checklist" section once all have been checked. Thank you for your contribution! diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index fe9a96bcafc0053c18332ca5901a93142811977f..37417497e1f84f3a54fb3e55da631c9ce363e700 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -1,6 +1,6 @@ flowchart parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] - polkadot[polkadot.network] --> devhub[polkadot_sdk_docs] + polkadot_network[polkadot.network] --> devhub[polkadot_sdk_docs] devhub --> polkadot_sdk devhub --> reference_docs @@ -9,5 +9,5 @@ flowchart polkadot_sdk --> substrate polkadot_sdk --> frame polkadot_sdk --> cumulus - polkadot_sdk --> polkadot + polkadot_sdk --> polkadot[polkadot node] polkadot_sdk --> xcm diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index b0671623f48d42dea7f773a4e568aa0d8a103b22..d3e48de5d181911f0686738c6d556e552176fd7a 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -15,95 +15,104 @@ workspace = true [dependencies] # Needed for all FRAME-based code -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.6.0", default-features = false } -frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame", features = [ +codec = { workspace = true } +scale-info = { workspace = true } +frame = { features = [ "experimental", "runtime", -] } -pallet-examples = { path = "../../substrate/frame/examples" } -pallet-default-config-example = { path = "../../substrate/frame/examples/default-config" } -pallet-example-offchain-worker = { path = "../../substrate/frame/examples/offchain-worker" } +], workspace = true, default-features = true } +pallet-examples = { workspace = true } +pallet-default-config-example = { workspace = true, default-features = true } +pallet-example-offchain-worker = { workspace = true, default-features = true } # How we build docs in rust-docs simple-mermaid = "0.1.1" -docify = "0.2.8" +docify = { workspace = true } # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. -polkadot-sdk = { path = "../../umbrella", features = ["runtime"] } -node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" } -kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } -chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } -subkey = { path = "../../substrate/bin/utils/subkey" } -frame-system = { path = "../../substrate/frame/system", default-features = false } -frame-support = { path = "../../substrate/frame/support", default-features = false } -frame-executive = { path = "../../substrate/frame/executive", default-features = false } -pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } -frame-metadata-hash-extension = { path = "../../substrate/frame/metadata-hash-extension" } +polkadot-sdk = { features = ["runtime"], workspace = true, default-features = true } +node-cli = { workspace = true } +kitchensink-runtime = { workspace = true } +chain-spec-builder = { workspace = true, default-features = true } +subkey = { workspace = true, default-features = true } +frame-system = { workspace = true } +frame-support = { workspace = true } +frame-executive = { workspace = true } +pallet-example-single-block-migrations = { workspace = true, default-features = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } # Substrate Client -sc-network = { path = "../../substrate/client/network" } -sc-rpc-api = { path = "../../substrate/client/rpc-api" } -sc-rpc = { path = "../../substrate/client/rpc" } -sc-client-db = { path = "../../substrate/client/db" } -sc-cli = { path = "../../substrate/client/cli" } -sc-consensus-aura = { path = "../../substrate/client/consensus/aura" } -sc-consensus-babe = { path = "../../substrate/client/consensus/babe" } -sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } -sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } -sc-consensus-manual-seal = { path = "../../substrate/client/consensus/manual-seal" } -sc-consensus-pow = { path = "../../substrate/client/consensus/pow" } -sc-executor = { path = "../../substrate/client/executor" } -sc-service = { path = "../../substrate/client/service" } -sc-chain-spec = { path = "../../substrate/client/chain-spec" } +sc-network = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-client-db = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-manual-seal = { workspace = true, default-features = true } +sc-consensus-pow = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } -substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../cumulus/pallets/aura-ext" } -cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-system" } -parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } -cumulus-primitives-proof-size-hostfunction = { path = "../../cumulus/primitives/proof-size-hostfunction" } -cumulus-client-service = { path = "../../cumulus/client/service" } -cumulus-primitives-storage-weight-reclaim = { path = "../../cumulus/primitives/storage-weight-reclaim" } +cumulus-pallet-aura-ext = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } +parachain-info = { workspace = true, default-features = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-client-service = { workspace = true, default-features = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } # Pallets and FRAME internals -pallet-aura = { path = "../../substrate/frame/aura" } -pallet-timestamp = { path = "../../substrate/frame/timestamp" } -pallet-balances = { path = "../../substrate/frame/balances" } -pallet-assets = { path = "../../substrate/frame/assets" } -pallet-preimage = { path = "../../substrate/frame/preimage" } -pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } -pallet-utility = { path = "../../substrate/frame/utility" } -pallet-multisig = { path = "../../substrate/frame/multisig" } -pallet-proxy = { path = "../../substrate/frame/proxy" } -pallet-authorship = { path = "../../substrate/frame/authorship" } -pallet-collective = { path = "../../substrate/frame/collective" } -pallet-democracy = { path = "../../substrate/frame/democracy" } -pallet-uniques = { path = "../../substrate/frame/uniques" } -pallet-nfts = { path = "../../substrate/frame/nfts" } -pallet-scheduler = { path = "../../substrate/frame/scheduler" } +pallet-aura = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-asset-tx-payment = { workspace = true, default-features = true } +pallet-skip-feeless-payment = { workspace = true, default-features = true } +pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-multisig = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +pallet-authorship = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } +pallet-democracy = { workspace = true, default-features = true } +pallet-uniques = { workspace = true, default-features = true } +pallet-nfts = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } +pallet-referenda = { workspace = true, default-features = true } +pallet-broker = { workspace = true, default-features = true } +pallet-babe = { workspace = true, default-features = true } # Primitives -sp-io = { path = "../../substrate/primitives/io" } -sp-api = { path = "../../substrate/primitives/api" } -sp-core = { path = "../../substrate/primitives/core" } -sp-keyring = { path = "../../substrate/primitives/keyring" } -sp-runtime = { path = "../../substrate/primitives/runtime" } -sp-arithmetic = { path = "../../substrate/primitives/arithmetic" } -sp-genesis-builder = { path = "../../substrate/primitives/genesis-builder" } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } -# Misc pallet dependencies -pallet-referenda = { path = "../../substrate/frame/referenda" } -pallet-broker = { path = "../../substrate/frame/broker" } -pallet-babe = { path = "../../substrate/frame/babe" } - -sp-offchain = { path = "../../substrate/primitives/offchain" } -sp-version = { path = "../../substrate/primitives/version" } # XCM -xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } -xcm-docs = { path = "../../polkadot/xcm/docs" } +xcm = { workspace = true, default-features = true } +xcm-docs = { workspace = true } # runtime guides -chain-spec-guide-runtime = { path = "./src/reference_docs/chain_spec_runtime" } +chain-spec-guide-runtime = { workspace = true } + +# Templates +minimal-template-runtime = { workspace = true } +solochain-template-runtime = { workspace = true } +parachain-template-runtime = { workspace = true } diff --git a/docs/sdk/src/guides/async_backing_guide.rs b/docs/sdk/src/guides/async_backing_guide.rs new file mode 100644 index 0000000000000000000000000000000000000000..f2f4dcabfd29b872de2981cd0a9c5003118c29c0 --- /dev/null +++ b/docs/sdk/src/guides/async_backing_guide.rs @@ -0,0 +1,254 @@ +//! # Upgrade Parachain for Asynchronous Backing Compatibility +//! +//! This guide is relevant for cumulus based parachain projects started in 2023 or before, whose +//! backing process is synchronous where parablocks can only be built on the latest Relay Chain +//! block. Async Backing allows collators to build parablocks on older Relay Chain blocks and create +//! pipelines of multiple pending parablocks. This parallel block generation increases efficiency +//! and throughput. For more information on Async backing and its terminology, refer to the document +//! on [the Polkadot Wiki.](https://wiki.polkadot.network/docs/maintain-guides-async-backing) +//! +//! > If starting a new parachain project, please use an async backing compatible template such as +//! > the +//! > [parachain template](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain). +//! The rollout process for Async Backing has three phases. Phases 1 and 2 below put new +//! infrastructure in place. Then we can simply turn on async backing in phase 3. +//! +//! ## Prerequisite +//! +//! The relay chain needs to have async backing enabled so double-check that the relay-chain +//! configuration contains the following three parameters (especially when testing locally e.g. with +//! zombienet): +//! +//! ```json +//! "async_backing_params": { +//! "max_candidate_depth": 3, +//! "allowed_ancestry_len": 2 +//! }, +//! "scheduling_lookahead": 2 +//! ``` +//! +//!
`scheduling_lookahead` must be set to 2, otherwise parachain block times +//! will degrade to worse than with sync backing!
+//! +//! ## Phase 1 - Update Parachain Runtime +//! +//! This phase involves configuring your parachainโ€™s runtime `/runtime/src/lib.rs` to make use of +//! async backing system. +//! +//! 1. Establish and ensure constants for `capacity` and `velocity` are both set to 1 in the +//! runtime. +//! 2. Establish and ensure the constant relay chain slot duration measured in milliseconds equal to +//! `6000` in the runtime. +//! ```rust +//! // Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the +//! // relay chain. +//! pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +//! // How many parachain blocks are processed by the relay chain per parent. Limits the number of +//! // blocks authored per slot. +//! pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; +//! // Relay chain slot duration, in milliseconds. +//! pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +//! ``` +//! +//! 3. Establish constants `MILLISECS_PER_BLOCK` and `SLOT_DURATION` if not already present in the +//! runtime. +//! ```ignore +//! // `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked +//! // up by `pallet_aura` to implement `fn slot_duration()`. +//! // +//! // Change this to adjust the block time. +//! pub const MILLISECS_PER_BLOCK: u64 = 12000; +//! pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; +//! ``` +//! +//! 4. Configure `cumulus_pallet_parachain_system` in the runtime. +//! +//! - Define a `FixedVelocityConsensusHook` using our capacity, velocity, and relay slot duration +//! constants. Use this to set the parachain system `ConsensusHook` property. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", ConsensusHook)] +//! ```ignore +//! impl cumulus_pallet_parachain_system::Config for Runtime { +//! .. +//! type ConsensusHook = ConsensusHook; +//! .. +//! } +//! ``` +//! - Set the parachain system property `CheckAssociatedRelayNumber` to +//! `RelayNumberMonotonicallyIncreases` +//! ```ignore +//! impl cumulus_pallet_parachain_system::Config for Runtime { +//! .. +//! type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; +//! .. +//! } +//! ``` +//! +//! 5. Configure `pallet_aura` in the runtime. +//! +//! - Set `AllowMultipleBlocksPerSlot` to `false` (don't worry, we will set it to `true` when we +//! activate async backing in phase 3). +//! +//! - Define `pallet_aura::SlotDuration` using our constant `SLOT_DURATION` +//! ```ignore +//! impl pallet_aura::Config for Runtime { +//! .. +//! type AllowMultipleBlocksPerSlot = ConstBool; +//! #[cfg(feature = "experimental")] +//! type SlotDuration = ConstU64; +//! .. +//! } +//! ``` +//! +//! 6. Update `sp_consensus_aura::AuraApi::slot_duration` in `sp_api::impl_runtime_apis` to match +//! the constant `SLOT_DURATION` +#![doc = docify::embed!("../../templates/parachain/runtime/src/apis.rs", impl_slot_duration)] +//! +//! 7. Implement the `AuraUnincludedSegmentApi`, which allows the collator client to query its +//! runtime to determine whether it should author a block. +//! +//! - Add the dependency `cumulus-primitives-aura` to the `runtime/Cargo.toml` file for your +//! runtime +//! ```ignore +//! .. +//! cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } +//! .. +//! ``` +//! +//! - In the same file, add `"cumulus-primitives-aura/std",` to the `std` feature. +//! +//! - Inside the `impl_runtime_apis!` block for your runtime, implement the +//! `cumulus_primitives_aura::AuraUnincludedSegmentApi` as shown below. +#![doc = docify::embed!("../../templates/parachain/runtime/src/apis.rs", impl_can_build_upon)] +//! +//! **Note:** With a capacity of 1 we have an effective velocity of ยฝ even when velocity is +//! configured to some larger value. This is because capacity will be filled after a single block is +//! produced and will only be freed up after that block is included on the relay chain, which takes +//! 2 relay blocks to accomplish. Thus with capacity 1 and velocity 1 we get the customary 12 second +//! parachain block time. +//! +//! 8. If your `runtime/src/lib.rs` provides a `CheckInherents` type to `register_validate_block`, +//! remove it. `FixedVelocityConsensusHook` makes it unnecessary. The following example shows how +//! `register_validate_block` should look after removing `CheckInherents`. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", register_validate_block)] +//! +//! +//! ## Phase 2 - Update Parachain Nodes +//! +//! This phase consists of plugging in the new lookahead collator node. +//! +//! 1. Import `cumulus_primitives_core::ValidationCode` to `node/src/service.rs`. +#![doc = docify::embed!("../../templates/parachain/node/src/service.rs", cumulus_primitives)] +//! +//! 2. In `node/src/service.rs`, modify `sc_service::spawn_tasks` to use a clone of `Backend` rather +//! than the original +//! ```ignore +//! sc_service::spawn_tasks(sc_service::SpawnTasksParams { +//! .. +//! backend: backend.clone(), +//! .. +//! })?; +//! ``` +//! +//! 3. Add `backend` as a parameter to `start_consensus()` in `node/src/service.rs` +//! ```text +//! fn start_consensus( +//! .. +//! backend: Arc, +//! .. +//! ``` +//! ```ignore +//! if validator { +//! start_consensus( +//! .. +//! backend.clone(), +//! .. +//! )?; +//! } +//! ``` +//! +//! 4. In `node/src/service.rs` import the lookahead collator rather than the basic collator +#![doc = docify::embed!("../../templates/parachain/node/src/service.rs", lookahead_collator)] +//! +//! 5. In `start_consensus()` replace the `BasicAuraParams` struct with `AuraParams` +//! - Change the struct type from `BasicAuraParams` to `AuraParams` +//! - In the `para_client` field, pass in a cloned para client rather than the original +//! - Add a `para_backend` parameter after `para_client`, passing in our para backend +//! - Provide a `code_hash_provider` closure like that shown below +//! - Increase `authoring_duration` from 500 milliseconds to 1500 +//! ```ignore +//! let params = AuraParams { +//! .. +//! para_client: client.clone(), +//! para_backend: backend.clone(), +//! .. +//! code_hash_provider: move |block_hash| { +//! client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) +//! }, +//! .. +//! authoring_duration: Duration::from_millis(1500), +//! .. +//! }; +//! ``` +//! +//! **Note:** Set `authoring_duration` to whatever you want, taking your own hardware into account. +//! But if the backer who should be slower than you due to reading from disk, times out at two +//! seconds your candidates will be rejected. +//! +//! 6. In `start_consensus()` replace `basic_aura::run` with `aura::run` +//! ```ignore +//! let fut = +//! aura::run::( +//! params, +//! ); +//! task_manager.spawn_essential_handle().spawn("aura", None, fut); +//! ``` +//! +//! ## Phase 3 - Activate Async Backing +//! +//! This phase consists of changes to your parachainโ€™s runtime that activate async backing feature. +//! +//! 1. Configure `pallet_aura`, setting `AllowMultipleBlocksPerSlot` to true in +//! `runtime/src/lib.rs`. +#![doc = docify::embed!("../../templates/parachain/runtime/src/configs/mod.rs", aura_config)] +//! +//! 2. Increase the maximum `UNINCLUDED_SEGMENT_CAPACITY` in `runtime/src/lib.rs`. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", async_backing_params)] +//! +//! 3. Decrease `MILLISECS_PER_BLOCK` to 6000. +//! +//! - Note: For a parachain which measures time in terms of its own block number rather than by +//! relay block number it may be preferable to increase velocity. Changing block time may cause +//! complications, requiring additional changes. See the section โ€œTiming by Block Numberโ€. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", block_times)] +//! +//! 4. Update `MAXIMUM_BLOCK_WEIGHT` to reflect the increased time available for block production. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", max_block_weight)] +//! +//! 5. Add a feature flagged alternative for `MinimumPeriod` in `pallet_timestamp`. The type should +//! be `ConstU64<0>` with the feature flag experimental, and `ConstU64<{SLOT_DURATION / 2}>` +//! without. +//! ```ignore +//! impl pallet_timestamp::Config for Runtime { +//! .. +//! #[cfg(feature = "experimental")] +//! type MinimumPeriod = ConstU64<0>; +//! #[cfg(not(feature = "experimental"))] +//! type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; +//! .. +//! } +//! ``` +//! +//! ## Timing by Block Number +//! +//! With asynchronous backing it will be possible for parachains to opt for a block time of 6 +//! seconds rather than 12 seconds. But modifying block duration isnโ€™t so simple for a parachain +//! which was measuring time in terms of its own block number. It could result in expected and +//! actual time not matching up, stalling the parachain. +//! +//! One strategy to deal with this issue is to instead rely on relay chain block numbers for timing. +//! Relay block number is kept track of by each parachain in `pallet-parachain-system` with the +//! storage value `LastRelayChainBlockNumber`. This value can be obtained and used wherever timing +//! based on block number is needed. + +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(rustdoc::private_intra_doc_links)] diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs index f5f6d2b5e0c0768f1cff2f759769598c4703c601..8296ed447e143d3fadcfbb2157786eea8934947f 100644 --- a/docs/sdk/src/guides/mod.rs +++ b/docs/sdk/src/guides/mod.rs @@ -1,7 +1,16 @@ //! # Polkadot SDK Docs Guides //! -//! This crate contains a collection of guides that are foundational to the developers of -//! Polkadot SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. +//! This crate contains a collection of guides that are foundational to the developers of Polkadot +//! SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. +//! +//! 1. [`crate::guides::your_first_pallet`] is your starting point with Polkadot SDK. It contains +//! the basics of +//! building a simple crypto currency with FRAME. +//! 2. [`crate::guides::your_first_runtime`] is the next step in your journey. It contains the +//! basics of building a runtime that contains this pallet, plus a few common pallets from FRAME. +//! +//! +//! Other guides are related to other miscellaneous topics and are listed as modules below. /// Write your first simple pallet, learning the most most basic features of FRAME along the way. pub mod your_first_pallet; @@ -11,21 +20,24 @@ pub mod your_first_pallet; pub mod your_first_runtime; /// Running the given runtime with a node. No specific consensus mechanism is used at this stage. -pub mod your_first_node; - -/// How to change the consensus engine of both the node and the runtime. -pub mod changing_consensus; +// TODO +// pub mod your_first_node; /// How to enhance a given runtime and node to be cumulus-enabled, run it as a parachain and connect /// it to a relay-chain. -pub mod cumulus_enabled_parachain; +// TODO +// pub mod cumulus_enabled_parachain; /// How to make a given runtime XCM-enabled, capable of sending messages (`Transact`) between itself /// and the relay chain to which it is connected. -pub mod xcm_enabled_parachain; +// TODO +// pub mod xcm_enabled_parachain; /// How to enable storage weight reclaiming in a parachain node and runtime. pub mod enable_pov_reclaim; +/// How to enable Async Backing on parachain projects that started in 2023 or before. +pub mod async_backing_guide; + /// How to enable metadata hash verification in the runtime. pub mod enable_metadata_hash; diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index c6e0dd0edf8925105661591d0831d65ff8ed02be..da4624f5ac2b85dcf884798fefca42da229760e0 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -14,18 +14,14 @@ //! > FRAME-based runtimes use various techniques to re-use a currency pallet instead of writing //! > one. Further advanced FRAME related topics are discussed in [`crate::reference_docs`]. //! -//! ## Topics Covered +//! ## Writing Your First Pallet //! -//! The following FRAME topics are covered in this guide: +//! To get started, use one of the templates mentioned in [`crate::polkadot_sdk::templates`]. We +//! recommend using the `polkadot-sdk-minimal-template`. You might need to change small parts of +//! this guide, namely the crate/package names, based on which tutorial you use. //! -//! - [Storage](frame::pallet_macros::storage) -//! - [Call](frame::pallet_macros::call) -//! - [Event](frame::pallet_macros::event) -//! - [Error](frame::pallet_macros::error) -//! - Basics of testing a pallet -//! - [Constructing a runtime](frame::runtime::prelude::construct_runtime) -//! -//! ## Writing Your First Pallet +//! > Be aware that you can read the entire source code backing this tutorial by clicking on the +//! > [`source`](./mod.rs.html) button at the top right of the page. //! //! You should have studied the following modules as a prelude to this guide: //! @@ -33,16 +29,28 @@ //! - [`crate::reference_docs::trait_based_programming`] //! - [`crate::polkadot_sdk::frame_runtime`] //! +//! ## Topics Covered +//! +//! The following FRAME topics are covered in this guide: +//! +//! - [`pallet::storage`] +//! - [`pallet::call`] +//! - [`pallet::event`] +//! - [`pallet::error`] +//! - Basics of testing a pallet +//! - [Constructing a runtime](frame::runtime::prelude::construct_runtime) +//! //! ### Shell Pallet //! //! Consider the following as a "shell pallet". We continue building the rest of this pallet based //! on this template. //! -//! [`pallet::config`](frame::pallet_macros::config) and -//! [`pallet::pallet`](frame::pallet_macros::pallet) are both mandatory parts of any pallet. Refer -//! to the documentation of each to get an overview of what they do. +//! [`pallet::config`] and [`pallet::pallet`] are both mandatory parts of any pallet. Refer to the +//! documentation of each to get an overview of what they do. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", shell_pallet)] //! +//! All of the code that follows in this guide should live inside of the `mod pallet`. +//! //! ### Storage //! //! First, we will need to create two onchain storage declarations. @@ -55,15 +63,14 @@ //! > generic bounded type in the `Config` trait, and then specify it in the implementation. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balance)] //! -//! The definition of these two storage items, based on [`frame::pallet_macros::storage`] details, -//! is as follows: +//! The definition of these two storage items, based on [`pallet::storage`] details, is as follows: #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", TotalIssuance)] #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balances)] //! //! ### Dispatchables //! -//! Next, we will define the dispatchable functions. As per [`frame::pallet_macros::call`], these -//! will be defined as normal `fn`s attached to `struct Pallet`. +//! Next, we will define the dispatchable functions. As per [`pallet::call`], these will be defined +//! as normal `fn`s attached to `struct Pallet`. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_pallet)] //! //! The logic of the functions is self-explanatory. Instead, we will focus on the FRAME-related @@ -79,7 +86,6 @@ //! was signed by `who`. #![doc = docify::embed!("../../substrate/frame/system/src/lib.rs", ensure_signed)] //! -//! //! - Where does `mutate`, `get` and `insert` and other storage APIs come from? All of them are //! explained in the corresponding `type`, for example, for `Balances::::insert`, you can look //! into [`frame::prelude::StorageMap::insert`]. @@ -95,8 +101,7 @@ //! //! - Why are all `get` and `mutate` functions returning an `Option`? This is the default behavior //! of FRAME storage APIs. You can learn more about how to override this by looking into -//! [`frame::pallet_macros::storage`], and -//! [`frame::prelude::ValueQuery`]/[`frame::prelude::OptionQuery`] +//! [`pallet::storage`], and [`frame::prelude::ValueQuery`]/[`frame::prelude::OptionQuery`] //! //! ### Improving Errors //! @@ -116,6 +121,25 @@ //! //! ### Your First (Test) Runtime //! +//! The typical testing code of a pallet lives in a module that imports some preludes useful for +//! testing, similar to: +//! +//! ``` +//! pub mod pallet { +//! // snip -- actually pallet code. +//! } +//! +//! #[cfg(test)] +//! mod tests { +//! // bring in the testing prelude of frame +//! use frame::testing_prelude::*; +//! // bring in all pallet items +//! use super::pallet::*; +//! +//! // snip -- rest of the testing code. +//! } +//! ``` +//! //! Next, we create a "test runtime" in order to test our pallet. Recall from //! [`crate::polkadot_sdk::frame_runtime`] that a runtime is a collection of pallets, expressed //! through [`frame::runtime::prelude::construct_runtime`]. All runtimes also have to include @@ -166,7 +190,6 @@ //! As noted above, the `T::AccountId` is now `u64`. Moreover, `Runtime` is replacing ``. //! This is why for example you see `Balances::::get(..)`. Finally, notice that the //! dispatchables are simply functions that can be called on top of the `Pallet` struct. -// TODO: hard to explain exactly `RuntimeOrigin::signed(ALICE)` at this point. //! //! Congratulations! You have written your first pallet and tested it! Next, we learn a few optional //! steps to improve our pallet. @@ -236,8 +259,7 @@ //! by one character. FRAME errors are exactly a solution to maintain readability, whilst fixing //! the drawbacks mentioned. In short, we use an enum to represent different variants of our //! error. These variants are then mapped in an efficient way (using only `u8` indices) to -//! [`sp_runtime::DispatchError::Module`]. Read more about this in -//! [`frame::pallet_macros::error`]. +//! [`sp_runtime::DispatchError::Module`]. Read more about this in [`pallet::error`]. //! //! - **Event**: Events are akin to the return type of dispatchables. They are mostly data blobs //! emitted by the runtime to let outside world know what is happening inside the pallet. Since @@ -246,20 +268,16 @@ //! use passive tense for event names (eg. `SomethingHappened`). This allows other sub-systems or //! external parties (eg. a light-node, a DApp) to listen to particular events happening, without //! needing to re-execute the whole state transition function. -// TODO: both need to be improved a lot at the pallet-macro rust-doc level. Also my explanation -// of event is probably not the best. //! //! With the explanation out of the way, let's see how these components can be added. Both follow a -//! fairly familiar syntax: normal Rust enums, with extra -//! [`#[frame::event]`](frame::pallet_macros::event) and -//! [`#[frame::error]`](frame::pallet_macros::error) attributes attached. +//! fairly familiar syntax: normal Rust enums, with extra [`pallet::event`] and [`pallet::error`] +//! attributes attached. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] //! -//! One slightly custom part of this is the [`#[pallet::generate_deposit(pub(super) fn -//! deposit_event)]`](frame::pallet_macros::generate_deposit) part. Without going into too -//! much detail, in order for a pallet to emit events to the rest of the system, it needs to do two -//! things: +//! One slightly custom part of this is the [`pallet::generate_deposit`] part. Without going into +//! too much detail, in order for a pallet to emit events to the rest of the system, it needs to do +//! two things: //! //! 1. Declare a type in its `Config` that refers to the overarching event type of the runtime. In //! short, by doing this, the pallet is expressing an important bound: `type RuntimeEvent: @@ -268,8 +286,8 @@ //! store it where needed. //! //! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME -//! provides a default way of storing events, and this is what -//! [`pallet::generate_deposit`](frame::pallet_macros::generate_deposit) is doing. +//! provides a default way of storing events, and this is what [`pallet::generate_deposit`] is +//! doing. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] //! //! > These `Runtime*` types are better explained in @@ -297,10 +315,17 @@ //! - [`crate::reference_docs::defensive_programming`]. //! - [`crate::reference_docs::frame_origin`]. //! - [`crate::reference_docs::frame_runtime_types`]. -//! - The pallet we wrote in this guide was using `dev_mode`, learn more in -//! [`frame::pallet_macros::config`]. +//! - The pallet we wrote in this guide was using `dev_mode`, learn more in [`pallet::config`]. //! - Learn more about the individual pallet items/macros, such as event and errors and call, in //! [`frame::pallet_macros`]. +//! +//! [`pallet::storage`]: ../../../frame_support/pallet_macros/attr.config.html +//! [`pallet::call`]: ../../../frame_support/pallet_macros/attr.call.html +//! [`pallet::event`]: ../../../frame_support/pallet_macros/attr.event.html +//! [`pallet::error`]: ../../../frame_support/pallet_macros/attr.error.html +//! [`pallet::pallet`]: ../../../frame_support/pallet_macros/attr.pallet.html +//! [`pallet::config`]: ../../../frame_support/pallet_macros/attr.config.html +//! [`pallet::generate_deposit`]: ../../../frame_support/pallet_macros/attr.generate_deposit.html #[docify::export] #[frame::pallet(dev_mode)] @@ -418,16 +443,22 @@ pub mod pallet { #[cfg(any(test, doc))] pub(crate) mod tests { use crate::guides::your_first_pallet::pallet::*; + + #[docify::export(testing_prelude)] use frame::testing_prelude::*; - const ALICE: u64 = 1; - const BOB: u64 = 2; - const CHARLIE: u64 = 3; + + pub(crate) const ALICE: u64 = 1; + pub(crate) const BOB: u64 = 2; + pub(crate) const CHARLIE: u64 = 3; #[docify::export] + // This runtime is only used for testing, so it should be somewhere like `#[cfg(test)] mod + // tests { .. }` mod runtime { use super::*; // we need to reference our `mod pallet` as an identifier to pass to // `construct_runtime`. + // YOU HAVE TO CHANGE THIS LINE BASED ON YOUR TEMPLATE use crate::guides::your_first_pallet::pallet as pallet_currency; construct_runtime!( @@ -595,7 +626,7 @@ pub mod pallet { #[test] fn transfer_works() { StateBuilder::default().build_and_execute(|| { - // given the the initial state, when: + // given the initial state, when: assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); // then: @@ -617,7 +648,7 @@ pub mod pallet { #[test] fn transfer_from_non_existent_fails() { StateBuilder::default().build_and_execute(|| { - // given the the initial state, when: + // given the initial state, when: assert_err!( Pallet::::transfer(RuntimeOrigin::signed(CHARLIE), ALICE, 10), "NonExistentAccount" @@ -738,7 +769,7 @@ pub mod pallet_v2 { // the final assertion. System::set_block_number(ALICE); - // given the the initial state, when: + // given the initial state, when: assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); // then: diff --git a/docs/sdk/src/guides/your_first_runtime.rs b/docs/sdk/src/guides/your_first_runtime.rs index 3e02ef1b1b28eedb7cea0ce38fb8b372a0caaf60..c58abc1120c13f5d9d724f7dd78464380f8840bf 100644 --- a/docs/sdk/src/guides/your_first_runtime.rs +++ b/docs/sdk/src/guides/your_first_runtime.rs @@ -1 +1,3 @@ //! # Your first Runtime +//! +//! ๐Ÿšง diff --git a/docs/sdk/src/polkadot_sdk/frame_runtime.rs b/docs/sdk/src/polkadot_sdk/frame_runtime.rs index f9b8a381365c4b3675485f3f32cb0cf751138275..39255c8f51ad6e589c22c7a29f08211b50053203 100644 --- a/docs/sdk/src/polkadot_sdk/frame_runtime.rs +++ b/docs/sdk/src/polkadot_sdk/frame_runtime.rs @@ -87,8 +87,6 @@ //! * writing a runtime in pure Rust, as done in [this template](https://github.com/JoshOrndorff/frameless-node-template). //! * writing a runtime in AssemblyScript,as explored in [this project](https://github.com/LimeChain/subsembly). -use frame::prelude::*; - /// A FRAME based pallet. This `mod` is the entry point for everything else. All /// `#[pallet::xxx]` macros must be defined in this `mod`. Although, frame also provides an /// experimental feature to break these parts into different `mod`s. See [`pallet_examples`] for @@ -96,7 +94,7 @@ use frame::prelude::*; #[docify::export] #[frame::pallet(dev_mode)] pub mod pallet { - use super::*; + use frame::prelude::*; /// The configuration trait of a pallet. Mandatory. Allows a pallet to receive types at a /// later point from the runtime that wishes to contain it. It allows the pallet to be diff --git a/docs/sdk/src/polkadot_sdk/templates.rs b/docs/sdk/src/polkadot_sdk/templates.rs index 4bf0e839c798fc55dd1fa0696c6bfe8411c32bc8..e87eb9c2bc8abe72538f4bc88a8f6348e1e90343 100644 --- a/docs/sdk/src/polkadot_sdk/templates.rs +++ b/docs/sdk/src/polkadot_sdk/templates.rs @@ -1,19 +1,33 @@ //! # Templates //! -//! ### Internal +//! This document enumerates a non-exhaustive list of templates that one can use to get started with +//! polkadot-sdk. //! -//! The following templates are maintained as a part of the `polkadot-sdk` repository: +//! > Know more tools/templates that are not listed here? please contribute them by opening a PR. //! -//! - classic [`substrate-node-template`]: is a white-labeled substrate-based blockchain with a -//! moderate amount of features. It can act as a great starting point for those who want to learn -//! Substrate/FRAME and want to have a template that is already doing something. -//! - [`substrate-minimal-template`]: Same as the above, but it contains the least amount of code in -//! both the node and runtime. It is a great starting point for those who want to deeply learn -//! Substrate and FRAME. -//! - classic [`cumulus-parachain-template`], which is the de-facto parachain template shipped with -//! Cumulus. It is the parachain-enabled version of [`substrate-node-template`]. +//! ## Internal //! -//! ### External Templates +//! The following [templates](https://github.com/paritytech/polkadot-sdk/blob/master/templates) are +//! maintained as a part of the `polkadot-sdk` repository: +//! +//! - `minimal_template_node`/[`minimal_template_runtime`]: A minimal template that contains the +//! least amount of features to be a functioning blockchain. Suitable for learning, development +//! and testing. This template is not meant to be used in production. +//! - `solochain_template_node`/[`solochain_template_runtime`]: Formerly known as +//! "substrate-node-template", is a white-labeled substrate-based blockchain (aka. solochain) that +//! contains moderate features, such as a basic consensus engine and some FRAME pallets. This +//! template can act as a good starting point for those who want to launch a solochain. +//! - `parachain_template_node`/[`parachain_template_runtime`]: A parachain template ready to be +//! connected to a test relay-chain. +//! +//! These templates are always kept up to date, and are mirrored to external repositories for easy +//! forking: +//! +//! - +//! - +//! - +//! +//! ## External Templates //! //! Noteworthy templates outside of this repository. //! @@ -22,23 +36,17 @@ //! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template): A //! parachain template for launching EVM-compatible parachains. //! -//! [`minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/minimal/ -//! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/parachain/ - -// TODO: in general, we need to make a deliberate choice here of moving a few key templates to this -// repo (nothing stays in `substrate-developer-hub`) and the everything else should be community -// maintained. https://github.com/paritytech/polkadot-sdk-docs/issues/67 - -// TODO: we should rename `substrate-node-template` to `substrate-basic-template`, -// `substrate-blockchain-template`. `node` is confusing in the name. -// `substrate-blockchain-template` and `cumulus-parachain-template` go well together ๐Ÿค. https://github.com/paritytech/polkadot-sdk-docs/issues/67 - -// NOTE: a super important detail that I am looking forward to here is -// and -// . Meaning that I would not spend time on -// teaching someone too much detail about the ugly thing we call "node" nowadays. In the future, I -// am sure we will either have a better "node-builder" code that can actually be tested, or an -// "omni-node" that can run (almost) any wasm file. We should already build tutorials in this -// direction IMO. This also affects all the templates. If we have a good neat runtime file, which we -// are moving toward, and a good node-builder, we don't need all of these damn templates. These -// templates are only there because the boilerplate is super horrible atm. +//! ## OpenZeppelin +//! +//! In June 2023, OpenZeppelin was awarded a grant from the [Polkadot +//! treasury](https://polkadot.polkassembly.io/treasury/406) for building a number of Polkadot-sdk +//! based templates. These templates are expected to be a great starting point for developers. +//! +//! - +//! +//! ## POP-CLI +//! +//! Is a CLI tool capable of scaffolding a new polkadot-sdk-based project, possibly removing the +//! need for templates. +//! +//! - diff --git a/docs/sdk/src/reference_docs/blockchain_scalibility.rs b/docs/sdk/src/reference_docs/blockchain_scalibility.rs deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml index c6dd3af9d90be8039cf2aabbdf04964ff5dc886d..9cf921a492a515ee424f0e47674a9aef2414b034 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml @@ -10,44 +10,44 @@ edition.workspace = true publish = false [dependencies] -docify = "0.2.8" -parity-scale-codec = { version = "3.6.12", default-features = false } -scale-info = { version = "2.6.0", default-features = false } -serde = { workspace = true, default-features = false } +docify = { workspace = true } +codec = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { package = "polkadot-sdk-frame", path = "../../../../../substrate/frame", default-features = false, features = [ +frame = { features = [ "experimental", "runtime", -] } +], workspace = true } # pallets that we want to use -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } # genesis builder that allows us to interact with runtime genesis config -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-keyring = { path = "../../../../../substrate/primitives/keyring", default-features = false } -sp-application-crypto = { path = "../../../../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } +sp-genesis-builder = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-keyring = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dev-dependencies] -chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../../../../substrate/bin/utils/chain-spec-builder" } -sc-chain-spec = { path = "../../../../../substrate/client/chain-spec" } +chain-spec-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } [features] default = ["std"] std = [ - "parity-scale-codec/std", + "codec/std", "scale-info/std", "frame/std", diff --git a/docs/sdk/src/reference_docs/consensus_swapping.rs b/docs/sdk/src/reference_docs/consensus_swapping.rs deleted file mode 100644 index e639761ee97b42fa68e1ba77250d490e28277e24..0000000000000000000000000000000000000000 --- a/docs/sdk/src/reference_docs/consensus_swapping.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! Consensus Swapping -//! -//! Notes: -//! -//! - The typical workshop done by Joshy in some places where he swaps out the consensus to be PoW. -//! - This could also be a tutorial rather than a ref doc, depending on the size. diff --git a/docs/sdk/src/reference_docs/custom_host_functions.rs b/docs/sdk/src/reference_docs/custom_host_functions.rs new file mode 100644 index 0000000000000000000000000000000000000000..719b208a2bff7ed6fb1929cbdccae7dddda2ab00 --- /dev/null +++ b/docs/sdk/src/reference_docs/custom_host_functions.rs @@ -0,0 +1,27 @@ +//! # Custom Host Functions +//! +//! Host functions are functions that the wasm instance can use to communicate with the node. Learn +//! more about this in [`crate::reference_docs::wasm_meta_protocol`]. +//! +//! ## Finding Host Functions +//! +//! To declare a set of functions as host functions, you need to use the `#[runtime_interface]` +//! ([`sp_runtime_interface`]) attribute macro. The most notable set of host functions are those +//! that allow the runtime to access the chain state, namely [`sp_io::storage`]. Some other notable +//! host functions are also defined in [`sp_io`]. +//! +//! ## Adding New Host Functions +//! +//! > Adding a new host function is a big commitment and should be done with care. Namely, the nodes +//! > in the network need to support all host functions forever in order to be able to sync +//! > historical blocks. +//! +//! Adding host functions is only possible when you are using a node-template, so that you have +//! access to the boilerplate of building your node. +//! +//! A group of host functions can always be grouped to gether as a tuple: +#![doc = docify::embed!("../../substrate/primitives/io/src/lib.rs", SubstrateHostFunctions)] +//! +//! The host functions are attached to the node side's [`sc_executor::WasmExecutor`]. For example in +//! the minimal template, the setup looks as follows: +#![doc = docify::embed!("../../templates/minimal/node/src/service.rs", FullClient)] diff --git a/docs/sdk/src/reference_docs/fee_less_runtime.rs b/docs/sdk/src/reference_docs/fee_less_runtime.rs index 1213c26282537fdee2d04ebd452289f0e6bc759f..9146b30ec5774355ce91dd02a5bb010dcf6fbada 100644 --- a/docs/sdk/src/reference_docs/fee_less_runtime.rs +++ b/docs/sdk/src/reference_docs/fee_less_runtime.rs @@ -1,5 +1,6 @@ //! # Fee-Less Runtime //! +//! ๐Ÿšง Work In Progress ๐Ÿšง //! //! Notes: //! diff --git a/docs/sdk/src/reference_docs/frame_logging.rs b/docs/sdk/src/reference_docs/frame_logging.rs new file mode 100644 index 0000000000000000000000000000000000000000..301fa7ef83f827c3115a6a30bb8d1ad35b5bfd83 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_logging.rs @@ -0,0 +1,116 @@ +//! # FRAME Logging +//! +//! This reference docs briefly explores how to do logging and printing runtimes, mainly +//! FRAME-based. +//! +//! ## Using `println!` +//! +//! To recap, as with standard Rust, you can use `println!` _in your tests_, but it will only print +//! out if executed with `--nocapture`, or if the test panics. +//! +//! ``` +//! fn it_print() { +//! println!("Hello, world!"); +//! } +//! ``` +//! +//! within the pallet, if you want to use the standard `println!`, it needs to be wrapped in +//! [`sp_std::if_std`]. Of course, this means that this print code is only available to you in the +//! `std` compiler flag, and never present in a wasm build. +//! +//! ``` +//! // somewhere in your pallet. This is not a real pallet code. +//! mod pallet { +//! struct Pallet; +//! impl Pallet { +//! fn print() { +//! sp_std::if_std! { +//! println!("Hello, world!"); +//! } +//! } +//! } +//! } +//! ``` +//! +//! ## Using `log` +//! +//! First, ensure you are familiar with the `log` crate. In short, each log statement has: +//! +//! 1. `log-level`, signifying how important it is +//! 2. `log-target`, signifying to which component it belongs. +//! +//! Add log statements to your pallet as such: +//! +//! You can add the log crate to the `Cargo.toml` of the pallet. +//! +//! ```text +//! #[dependencies] +//! log = { version = "x.y.z", default-features = false } +//! +//! #[features] +//! std = [ +//! // snip -- other pallets +//! "log/std" +//! ] +//! ``` +//! +//! More conveniently, the `frame` umbrella crate re-exports the log crate as [`frame::log`]. +//! +//! Then, the pallet can use this crate to emit log statements. In this statement, we use the info +//! level, and the target is `pallet-example`. +//! +//! ``` +//! mod pallet { +//! struct Pallet; +//! +//! impl Pallet { +//! fn logs() { +//! frame::log::info!(target: "pallet-example", "Hello, world!"); +//! } +//! } +//! } +//! ``` +//! +//! This will in itself just emit the log messages, **but unless if captured by a logger, they will +//! not go anywhere**. [`sp_api`] provides a handy function to enable the runtime logging: +//! +//! ``` +//! // in your test +//! fn it_also_prints() { +//! sp_api::init_runtime_logger(); +//! // call into your pallet, and now it will print `log` statements. +//! } +//! ``` +//! +//! Alternatively, you can use [`sp_tracing::try_init_simple`]. +//! +//! `info`, `error` and `warn` logs are printed by default, but if you want lower level logs to also +//! be printed, you must to add the following compiler flag: +//! +//! ```text +//! RUST_LOG=pallet-example=trace cargo test +//! ``` +//! +//! ## Enabling Logs in Production +//! +//! All logs from the runtime are emitted by default, but there is a feature flag in [`sp_api`], +//! called `disable-logging`, that can be used to disable all logs in the runtime. This is useful +//! for production chains to reduce the size and overhead of the wasm runtime. +#![doc = docify::embed!("../../substrate/primitives/api/src/lib.rs", init_runtime_logger)] +//! +//! Similar to the above, the proper `RUST_LOG` must also be passed to your compiler flag when +//! compiling the runtime. +//! +//! ## Log Target Prefixing +//! +//! Many [`crate::polkadot_sdk::frame_runtime`] pallets emit logs with log target `runtime::`, for example `runtime::system`. This then allows one to run a node with a wasm blob +//! compiled with `LOG_TARGET=runtime=debug`, which enables the log target of all pallets who's log +//! target starts with `runtime`. +//! +//! ## Low Level Primitives +//! +//! Under the hood, logging is another instance of host functions under the hood (as defined in +//! [`crate::reference_docs::wasm_meta_protocol`]). The runtime uses a set of host functions under +//! [`sp_io::logging`] and [`sp_io::misc`] to emit all logs and prints. You typically do not need to +//! use these APIs directly. diff --git a/docs/sdk/src/reference_docs/frame_offchain_workers.rs b/docs/sdk/src/reference_docs/frame_offchain_workers.rs index 7999707e5ee018c4bb7634e7a506ff8fee8fa8ac..1ec9212e2306613b72ec4c6f433a68c92f5bb0d5 100644 --- a/docs/sdk/src/reference_docs/frame_offchain_workers.rs +++ b/docs/sdk/src/reference_docs/frame_offchain_workers.rs @@ -58,7 +58,6 @@ //! [`frame::pallet_macros::hooks`]. //! //! ``` -//! //! #[frame::pallet] //! pub mod pallet { //! use frame::prelude::*; diff --git a/docs/sdk/src/reference_docs/frame_storage_derives.rs b/docs/sdk/src/reference_docs/frame_storage_derives.rs new file mode 100644 index 0000000000000000000000000000000000000000..3d9edef398a072839f82c21be76b74da6cf20ff2 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_storage_derives.rs @@ -0,0 +1,199 @@ +//!
+//! In all examples, a few lines of boilerplate have been hidden from each snippet for conciseness. +//!
+//! +//! Let's begin by starting to store a `NewType` in a storage item: +//! +//! ```compile_fail +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! pub struct NewType(u32); +// +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType>; +//! } +//! ``` +//! +//! This raises a number of compiler errors, like: +//! ```text +//! the trait `MaxEncodedLen` is not implemented for `NewType`, which is required by +//! `frame::prelude::StorageValue<_GeneratedPrefixForStorageSomething, NewType>: +//! StorageInfoTrait` +//! ``` +//! +//! This implies the following set of traits that need to be derived for a type to be stored in +//! `frame` storage: +//! ```rust +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive(codec::Encode, codec::Decode, codec::MaxEncodedLen, scale_info::TypeInfo)] +//! pub struct NewType(u32); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType>; +//! } +//! ``` +//! +//! Next, let's look at how this will differ if we are to store a type that is derived from `T` in +//! storage, such as [`frame::prelude::BlockNumberFor`]: +//! ```compile_fail +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive(codec::Encode, codec::Decode, codec::MaxEncodedLen, scale_info::TypeInfo)] +//! pub struct NewType(BlockNumberFor); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType>; +//! } +//! ``` +//! +//! Surprisingly, this will also raise a number of errors, like: +//! ```text +//! the trait `TypeInfo` is not implemented for `T`, which is required +//! by`frame_support::pallet_prelude::StorageValue, +//! pallet_2::NewType>:StorageEntryMetadataBuilder +//! ``` +//! +//! Why is that? The underlying reason is that the `TypeInfo` `derive` macro will only work for +//! `NewType` if all of `NewType`'s generics also implement `TypeInfo`. This is not the case for `T` +//! in the example above. +//! +//! If you expand an instance of the derive, you will find something along the lines of: +//! `impl TypeInfo for NewType where T: TypeInfo { ... }`. This is the reason why the +//! `TypeInfo` trait is required for `T`. +//! +//! To fix this, we need to add a `#[scale_info(skip_type_params(T))]` +//! attribute to `NewType`. This additional macro will instruct the `derive` to skip the bound on +//! `T`. +//! ```rust +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive(codec::Encode, codec::Decode, codec::MaxEncodedLen, scale_info::TypeInfo)] +//! #[scale_info(skip_type_params(T))] +//! pub struct NewType(BlockNumberFor); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType>; +//! } +//! ``` +//! +//! Next, let's say we wish to store `NewType` as [`frame::prelude::ValueQuery`], which means it +//! must also implement `Default`. This should be as simple as adding `derive(Default)` to it, +//! right? +//! ```compile_fail +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive(codec::Encode, codec::Decode, codec::MaxEncodedLen, scale_info::TypeInfo, Default)] +//! #[scale_info(skip_type_params(T))] +//! pub struct NewType(BlockNumberFor); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType, ValueQuery>; +//! } +//! ``` +//! +//! Under the hood, the expansion of the `derive(Default)` will suffer from the same restriction as +//! before: it will only work if `T: Default`, and `T` is not `Default`. Note that this is an +//! expected issue: `T` is merely a wrapper of many other types, such as `BlockNumberFor`. +//! `BlockNumberFor` should indeed implement `Default`, but `T` implementing `Default` is rather +//! meaningless. +//! +//! To fix this, frame provides a set of macros that are analogous to normal rust derive macros, but +//! work nicely on top of structs that are generic over `T: Config`. These macros are: +//! +//! - [`frame::prelude::DefaultNoBound`] +//! - [`frame::prelude::DebugNoBound`] +//! - [`frame::prelude::PartialEqNoBound`] +//! - [`frame::prelude::EqNoBound`] +//! - [`frame::prelude::CloneNoBound`] +//! - [`frame::prelude::PartialOrdNoBound`] +//! - [`frame::prelude::OrdNoBound`] +//! +//! The above traits are almost certainly needed for your tests: To print your type, assert equality +//! or clone it. +//! +//! We can fix the following example by using [`frame::prelude::DefaultNoBound`]. +//! ```rust +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive( +//! codec::Encode, +//! codec::Decode, +//! codec::MaxEncodedLen, +//! scale_info::TypeInfo, +//! DefaultNoBound +//! )] +//! #[scale_info(skip_type_params(T))] +//! pub struct NewType(BlockNumberFor); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType, ValueQuery>; +//! } +//! ``` +//! +//! Finally, if a custom type that is provided through `Config` is to be stored in the storage, it +//! is subject to the same trait requirements. The following does not work: +//! ```compile_fail +//! #[frame::pallet] +//! pub mod pallet { +//! use frame::prelude::*; +//! #[pallet::config] +//! pub trait Config: frame_system::Config { +//! type CustomType; +//! } +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! #[pallet::storage] +//! pub type Something = StorageValue<_, T::CustomType>; +//! } +//! ``` +//! +//! But adding the right trait bounds will fix it. +//! ```rust +//! #[frame::pallet] +//! pub mod pallet { +//! use frame::prelude::*; +//! #[pallet::config] +//! pub trait Config: frame_system::Config { +//! type CustomType: codec::FullCodec +//! + codec::MaxEncodedLen +//! + scale_info::TypeInfo +//! + Debug +//! + Default; +//! } +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! #[pallet::storage] +//! pub type Something = StorageValue<_, T::CustomType>; +//! } +//! ``` diff --git a/docs/sdk/src/reference_docs/frame_system_accounts.rs b/docs/sdk/src/reference_docs/frame_system_accounts.rs index ae9d2c9e0cb3ca7a694d9e5330363c05f6f78cb0..523fe704308497d3116d36cfca086a617b3a9d3b 100644 --- a/docs/sdk/src/reference_docs/frame_system_accounts.rs +++ b/docs/sdk/src/reference_docs/frame_system_accounts.rs @@ -1,5 +1,7 @@ //! # FRAME Accounts //! +//! //! ๐Ÿšง Work In Progress ๐Ÿšง +//! //! How `frame_system` handles accountIds. Nonce. Consumers and Providers, reference counting. // - poorly understood topics, needs one great article to rul them all. diff --git a/docs/sdk/src/reference_docs/light_nodes.rs b/docs/sdk/src/reference_docs/light_nodes.rs deleted file mode 100644 index d6670bf03ab1a8e36ef1d8b80717d1b3833daff5..0000000000000000000000000000000000000000 --- a/docs/sdk/src/reference_docs/light_nodes.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! # Light Clients -//! -//! -//! Notes: should contain only high level information about light clients, then link to how to set -//! it up in PAPI and SubXT -//! -//! diff --git a/docs/sdk/src/reference_docs/metadata.rs b/docs/sdk/src/reference_docs/metadata.rs index 702c1c30fd9cf2d09082b39fecf880065ebd5375..96f92ac0c412ba252092170e0788680300385901 100644 --- a/docs/sdk/src/reference_docs/metadata.rs +++ b/docs/sdk/src/reference_docs/metadata.rs @@ -1 +1,25 @@ //! # Metadata +//! +//! The existence of metadata in polkadot-sdk goes back to the (forkless) upgrade-ability of all +//! Substrate-based blockchains, which is achieved through +//! [`crate::reference_docs::wasm_meta_protocol`]. You can learn more about the details of how to +//! deal with these upgrades in [`crate::reference_docs::frame_runtime_upgrades_and_migrations`]. +//! +//! Another consequence of upgrade-ability is that as a UI, wallet, or generally an offchain entity, +//! it is hard to know the types internal to the runtime, specifically in light of the fact that +//! they can change at any point in time. +//! +//! This is why all Substrate-based runtimes must expose a [`sp_api::Metadata`] api, which mandates +//! the runtime to return a description of itself. The return type of this api is `Vec`, meaning +//! that it is up to the runtime developer to decide on the format of this. +//! +//! All [`crate::polkadot_sdk::frame_runtime`] based runtimes expose a specific metadata language, +//! maintained in which is adopted in the Polkadot +//! ecosystem. +//! +//! ## Metadata Explorers: +//! +//! A few noteworthy tools that inspect the (FRAME-based) metadata of a chain: +//! +//! +//! diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 8e0431c48b6f69922fa37a38661175c621c774e4..c69c79365427e25726a95e17913f535a244fe368 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -40,12 +40,15 @@ pub mod runtime_vs_smart_contract; pub mod extrinsic_encoding; /// Learn about the signed extensions that form a part of extrinsics. -// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 pub mod signed_extensions; /// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. pub mod frame_origin; +/// Learn about the details of what derives are needed for a type to be store-able in `frame` +/// storage. +pub mod frame_storage_derives; + /// Learn about how to write safe and defensive code in your FRAME runtime. pub mod defensive_programming; @@ -59,9 +62,11 @@ pub mod fee_less_runtime; /// Learn about metadata, the main means through which an upgradeable runtime communicates its /// properties to the outside world. -// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/47 pub mod metadata; +/// Learn about how to add custom host functions to the node. +pub mod custom_host_functions; + /// Learn about how frame-system handles `account-ids`, nonces, consumers and providers. pub mod frame_system_accounts; @@ -78,26 +83,12 @@ pub mod frame_tokens; /// Learn about chain specification file and the genesis state of the blockchain. pub mod chain_spec_genesis; -/// Learn about all the memory limitations of the WASM runtime when it comes to memory usage. -// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/52 -pub mod wasm_memory; - /// Learn about Substrate's CLI, and how it can be extended. -// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/53 pub mod cli; -/// Learn about Substrate's consensus algorithms, and how you can switch between two. -// TODO: @JoshOrndorff @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/54 -pub mod consensus_swapping; - /// Learn about Runtime Upgrades and best practices for writing Migrations. pub mod frame_runtime_upgrades_and_migrations; -/// Learn about light nodes, how they function, and how Substrate-based chains come -/// light-node-first out of the box. -// TODO: @jsdw @josepot https://github.com/paritytech/polkadot-sdk-docs/issues/68 -pub mod light_nodes; - /// Learn about the offchain workers, how they function, and how to use them, as provided by the /// [`frame`] APIs. pub mod frame_offchain_workers; @@ -106,6 +97,9 @@ pub mod frame_offchain_workers; /// together. pub mod frame_pallet_coupling; +/// Learn about how to do logging in FRAME-based runtimes. +pub mod frame_logging; + /// Learn about the Polkadot Umbrella crate that re-exports all other crates. pub mod umbrella_crate; diff --git a/docs/sdk/src/reference_docs/signed_extensions.rs b/docs/sdk/src/reference_docs/signed_extensions.rs index 43a6bcc14c5d2f67703864c485264583acb19ea3..c644aeaa41650817cd02b0dcc7a97e80c2b19bd0 100644 --- a/docs/sdk/src/reference_docs/signed_extensions.rs +++ b/docs/sdk/src/reference_docs/signed_extensions.rs @@ -1,7 +1,59 @@ //! Signed extensions are, briefly, a means for different chains to extend the "basic" extrinsic //! format with custom data that can be checked by the runtime. //! -//! # Example +//! # FRAME provided signed extensions +//! +//! FRAME by default already provides the following signed extensions: +//! +//! - [`CheckGenesis`](frame_system::CheckGenesis): Ensures that a transaction was sent for the same +//! network. Determined based on genesis. +//! +//! - [`CheckMortality`](frame_system::CheckMortality): Extends a transaction with a configurable +//! mortality. +//! +//! - [`CheckNonZeroSender`](frame_system::CheckNonZeroSender): Ensures that the sender of a +//! transaction is not the *all zero account* (all bytes of the accountid are zero). +//! +//! - [`CheckNonce`](frame_system::CheckNonce): Extends a transaction with a nonce to prevent replay +//! of transactions and to provide ordering of transactions. +//! +//! - [`CheckSpecVersion`](frame_system::CheckSpecVersion): Ensures that a transaction was built for +//! the currently active runtime. +//! +//! - [`CheckTxVersion`](frame_system::CheckTxVersion): Ensures that the transaction signer used the +//! correct encoding of the call. +//! +//! - [`CheckWeight`](frame_system::CheckWeight): Ensures that the transaction fits into the block +//! before dispatching it. +//! +//! - [`ChargeTransactionPayment`](pallet_transaction_payment::ChargeTransactionPayment): Charges +//! transaction fees from the signer based on the weight of the call using the native token. +//! +//! - [`ChargeAssetTxPayment`](pallet_asset_tx_payment::ChargeAssetTxPayment): Charges transaction +//! fees from the signer based on the weight of the call using any supported asset (including the +//! native token). +//! +//! - [`ChargeAssetTxPayment`(using +//! conversion)](pallet_asset_conversion_tx_payment::ChargeAssetTxPayment): Charges transaction +//! fees from the signer based on the weight of the call using any supported asset (including the +//! native token). The asset is converted to the native token using a pool. +//! +//! - [`SkipCheckIfFeeless`](pallet_skip_feeless_payment::SkipCheckIfFeeless): Allows transactions +//! to be processed without paying any fee. This requires that the `call` that should be +//! dispatched is augmented with the [`feeless_if`](frame_support::pallet_macros::feeless_if) +//! attribute. +//! +//! - [`CheckMetadataHash`](frame_metadata_hash_extension::CheckMetadataHash): Extends transactions +//! to include the so-called metadata hash. This is required by chains to support the generic +//! Ledger application and other similar offline wallets. +//! +//! - [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim): A +//! signed extension for parachains that reclaims unused storage weight after executing a +//! transaction. +//! +//! For more information about these extensions, follow the link to the type documentation. +//! +//! # Building a custom signed extension //! //! Defining a couple of very simple signed extensions looks like the following: #![doc = docify::embed!("./src/reference_docs/signed_extensions.rs", signed_extensions_example)] diff --git a/docs/sdk/src/reference_docs/umbrella_crate.rs b/docs/sdk/src/reference_docs/umbrella_crate.rs index 9751b0ad5ad6d94a20e027e902ac055fa9e49181..0b3445cfc4bc0ce27b6dcb144618dd7001e6f20a 100644 --- a/docs/sdk/src/reference_docs/umbrella_crate.rs +++ b/docs/sdk/src/reference_docs/umbrella_crate.rs @@ -28,8 +28,9 @@ //! `node` feature. For docs.rs the manifest contains specific configuration to make it show up //! all re-exports. //! -//! There is a specific `zepter` check in place to ensure that the features of the umbrella are -//! correctly configured. This check is run in CI and locally when running `zepter`. +//! There is a specific [`zepter`](https://github.com/ggwpez/zepter) check in place to ensure that +//! the features of the umbrella are correctly configured. This check is run in CI and locally when +//! running `zepter`. //! //! ## Generation //! diff --git a/docs/sdk/src/reference_docs/wasm_memory.rs b/docs/sdk/src/reference_docs/wasm_memory.rs deleted file mode 100644 index 4f4cda31094e40e8f308dbb06de4ab0ecd828777..0000000000000000000000000000000000000000 --- a/docs/sdk/src/reference_docs/wasm_memory.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! # WASM Memory Limitations. -//! -//! Notes: -//! -//! - Stack: Need to use `Box<_>` -//! - Heap: Substrate imposes a limit. PvF execution has its own limits -//! - Heap: There is also a maximum amount that a single allocation can have. diff --git a/docs/sdk/src/reference_docs/wasm_meta_protocol.rs b/docs/sdk/src/reference_docs/wasm_meta_protocol.rs index 37d1460f0e1a3737217ac0c80ec41de769db4c1a..0e91e65c55e36d99d6dfbe03e7cda3af09dad942 100644 --- a/docs/sdk/src/reference_docs/wasm_meta_protocol.rs +++ b/docs/sdk/src/reference_docs/wasm_meta_protocol.rs @@ -1,11 +1,13 @@ //! # WASM Meta Protocol //! //! All Substrate based chains adhere to a unique architectural design novel to the Polkadot -//! ecosystem. We refer to this design as the "WASM Meta Protocol". +//! ecosystem. We refer to this design as the "**WASM Meta Protocol**". //! //! Consider the fact that a traditional blockchain software is usually a monolithic artifact. -//! Upgrading any part of the system implies upgrading the entire system. This has historically led -//! to cumbersome forkful upgrades to be the status quo in the blockchain ecosystem. +//! **Upgrading any part of the system implies upgrading the entire system**. This has historically +//! led to cumbersome forkful upgrades to be the status quo in blockchain ecosystems. In other +//! words, the entire node software is the specification of the blockchain's [`state transition +//! function`](crate::reference_docs::blockchain_state_machines). //! //! Moreover, the idea of "storing code in the state" is explored in the context of smart contracts //! platforms, but has not been expanded further. @@ -15,17 +17,16 @@ //! that a smart contract platform stores the code of individual contracts in its state. As noted in //! [`crate::reference_docs::blockchain_state_machines`], this state transition function is called //! the **Runtime**, and WASM is chosen as the bytecode. The Runtime is stored under a special key -//! in the state (see -//! [`sp_core::storage::well_known_keys`](../../../sp_core/index.html)) and can be -//! updated as a part of the state transition function's execution, just like a user's account -//! balance can be updated. +//! in the state (see [`sp_core::storage::well_known_keys`]) and can be updated as a part of the +//! state transition function's execution, just like a user's account balance can be updated. //! //! > Note that while we drew an analogy between smart contracts and runtimes in the above, there //! > are fundamental differences between the two, explained in //! > [`crate::reference_docs::runtime_vs_smart_contract`]. //! -//! The rest of the system that is NOT the state transition function is called the **node**, and -//! is a normal binary that is compiled from Rust to different hardware targets. +//! The rest of the system that is NOT the state transition function is called the +//! [**Node**](crate::reference_docs::glossary#node), and is a normal binary that is compiled from +//! Rust to different hardware targets. //! //! This design enables all Substrate-based chains to be fork-less-ly upgradeable, because the //! Runtime can be updates on the fly, within the execution of a block, and the node is (for the @@ -47,15 +48,18 @@ #![doc = simple_mermaid::mermaid!("../../../mermaid/substrate_client_runtime.mmd")] //! //! A runtime must have a set of runtime APIs in order to have any meaningful blockchain -//! functionality, but it can also expose more APIs. See TODO as an example of how to add custom -//! runtime APIs to your FRAME-based runtime. +//! functionality, but it can also expose more APIs. See +//! [`crate::reference_docs::custom_runtime_api_rpc`] as an example of how to add custom runtime +//! APIs to your FRAME-based runtime. //! //! Similarly, for a runtime to be "compatible" with a node, the node must implement the full set of //! host functions that the runtime at any point in time requires. Given the fact that a runtime can //! evolve in time, and a blockchain node (typically) wishes to be capable of re-executing all the //! previous blocks, this means that a node must always maintain support for the old host functions. -//! This also implies that adding a new host function is a big commitment and should be done with -//! care. This is why, for example, adding a new host function to Polkadot always requires an RFC. +//! **This implies that adding a new host function is a big commitment and should be done with +//! care**. This is why, for example, adding a new host function to Polkadot always requires an RFC. +//! Learn how to add a new host function to your runtime in +//! [`crate::reference_docs::custom_host_functions`]. //! //! ## Node vs. Runtime //! @@ -90,11 +94,11 @@ //! //! In fact, [`sp_core::storage::well_known_keys`] are the only state keys that the node side is //! aware of. The rest of the state, including what logic the runtime has, what balance each user -//! has and such are all only comprehensible to the runtime. +//! has and such, are all only comprehensible to the runtime. #![doc = simple_mermaid::mermaid!("../../../mermaid/state.mmd")] //! //! In the above diagram, all of the state keys and values are opaque bytes to the node. The node -//! does not know what they mean, and it does not now what is the type of the corresponding value +//! does not know what they mean, and it does not know what is the type of the corresponding value //! (e.g. if it is a number of a vector). Contrary, the runtime knows both the meaning of their //! keys, and the type of the values. //! @@ -105,9 +109,50 @@ //! //! ## Native Runtime //! -//! TODO +//! Historically, the node software also kept a native copy of the runtime at the time of +//! compilation within it. This used to be called the "Native Runtime". The main purpose of the +//! native runtime used to be leveraging the faster execution time and better debugging +//! infrastructure of native code. However, neither of the two arguments strongly hold and the +//! native runtime is being fully removed from the node-sdk. //! +//! See: +//! +//! > Also, note that the flags [`sc_cli::ExecutionStrategy::Native`] is already a noop and all +//! > chains built with Substrate only use WASM execution. +//! +//! ### Runtime Versions +//! +//! An important detail of the native execution worth learning about is that the node software, +//! obviously, only uses the native runtime if it is the same code as with the wasm blob stored +//! onchain. Else, nodes who run the native runtime will come to a different state transition. How +//! do nodes determine if two runtimes are the same? Through the very important +//! [`sp_version::RuntimeVersion`]. All runtimes expose their version via a runtime api +//! ([`sp_api::Core::version`]) that returns this struct. The node software, or other applications, +//! inspect this struct to examine the identity of a runtime, and to determine if two runtimes are +//! the same. Namely, [`sp_version::RuntimeVersion::spec_version`] is the main key that implies two +//! runtimes are the same. +//! +//! Therefore, it is utmost important to make sure before any runtime upgrade, the spec version is +//! updated. //! //! ## Example: Block Execution. //! -//! TODO +//! As a final example to recap, let's look at how Substrate-based nodes execute blocks. Blocks are +//! received in the node side software as opaque blobs and in the networking layer. +//! +//! At some point, based on the consensus algorithm's rules, the node decides to import (aka. +//! *validate*) a block. +//! +//! * First, the node will then fetch the state of the parent hash of the block that wishes to be +//! imported. +//! * The runtime is fetched from this state, and placed into a WASM execution environment. +//! * The [`sp_api::Core::execute_block`] runtime API is called and the blocked is passed in as an +//! argument. +//! * The runtime will then execute the block, and update the state accordingly. Any state update is +//! issues via the [`sp_io::storage`] host functions. +//! * Both the runtime and node will check the state-root of the state after the block execution to +//! match the one claimed in the block header. +//! +//! > Example taken from [this +//! > lecture](https://polkadot-blockchain-academy.github.io/pba-book/substrate/wasm/page.html#example-2-block-import-9) +//! > of the Polkadot Blockchain Academy. diff --git a/master.wasm b/master.wasm new file mode 100644 index 0000000000000000000000000000000000000000..7ebb14371243afa4956a107374b27a4e686f0360 Binary files /dev/null and b/master.wasm differ diff --git a/modified.wasm b/modified.wasm new file mode 100644 index 0000000000000000000000000000000000000000..7ebb14371243afa4956a107374b27a4e686f0360 Binary files /dev/null and b/modified.wasm differ diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 3aeec8d5961e35133233c35c38f57c2145c7f62c..3a939464868fed72d4bf89f3501dae84769d97b0 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -25,32 +25,32 @@ default-run = "polkadot" workspace = true [dependencies] -color-eyre = { version = "0.6.1", default-features = false } -tikv-jemallocator = { version = "0.5.0", optional = true, features = ["unprefixed_malloc_on_supported_platforms"] } +color-eyre = { workspace = true } +tikv-jemallocator = { optional = true, features = ["unprefixed_malloc_on_supported_platforms"], workspace = true } # Crates in our workspace, defined as dependencies so we can pass them feature flags. -polkadot-cli = { path = "cli", features = ["rococo-native", "westend-native"] } -polkadot-node-core-pvf = { path = "node/core/pvf" } -polkadot-node-core-pvf-prepare-worker = { path = "node/core/pvf/prepare-worker" } -polkadot-overseer = { path = "node/overseer" } +polkadot-cli = { features = ["rococo-native", "westend-native"], workspace = true, default-features = true } +polkadot-node-core-pvf = { workspace = true, default-features = true } +polkadot-node-core-pvf-prepare-worker = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } # Needed for worker binaries. -polkadot-node-core-pvf-common = { path = "node/core/pvf/common" } -polkadot-node-core-pvf-execute-worker = { path = "node/core/pvf/execute-worker" } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-node-core-pvf-execute-worker = { workspace = true, default-features = true } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"] } [dev-dependencies] -assert_cmd = "2.0.4" -nix = { version = "0.28.0", features = ["signal"] } -tempfile = "3.2.0" -tokio = "1.37" -substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client" } -polkadot-core-primitives = { path = "core-primitives" } +assert_cmd = { workspace = true } +nix = { features = ["signal"], workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true, default-features = true } +substrate-rpc-client = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } [build-dependencies] -substrate-build-script-utils = { path = "../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [badges] maintenance = { status = "actively-developed" } @@ -68,6 +68,11 @@ jemalloc-allocator = [ "polkadot-overseer/jemalloc-allocator", ] +# Generate the metadata hash needed for CheckMetadataHash +# in the builtin test runtimes (westend and rococo). +metadata-hash = [ + "polkadot-cli/metadata-hash", +] # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 1917dcd579c4c6a723acc7f5fcde086f6e8e5e53..da37f6062c5725e9162718c7d49ee50d94617140 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -18,37 +18,38 @@ wasm-opt = false crate-type = ["cdylib", "rlib"] [dependencies] -cfg-if = "1.0" -clap = { version = "4.5.3", features = ["derive"], optional = true } +cfg-if = { workspace = true } +clap = { features = ["derive"], optional = true, workspace = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } -futures = "0.3.30" -pyroscope = { version = "0.5.3", optional = true } -pyroscope_pprofrs = { version = "0.2", optional = true } +futures = { workspace = true } +pyroscope = { optional = true, workspace = true } +pyroscope_pprofrs = { optional = true, workspace = true } -polkadot-service = { path = "../node/service", default-features = false, optional = true } +polkadot-service = { optional = true, workspace = true } -sp-core = { path = "../../substrate/primitives/core" } -sp-io = { path = "../../substrate/primitives/io" } -sp-keyring = { path = "../../substrate/primitives/keyring" } -sp-maybe-compressed-blob = { path = "../../substrate/primitives/maybe-compressed-blob" } -frame-benchmarking-cli = { path = "../../substrate/utils/frame/benchmarking-cli", optional = true } -sc-cli = { path = "../../substrate/client/cli", optional = true } -sc-service = { path = "../../substrate/client/service", optional = true } -polkadot-node-metrics = { path = "../node/metrics" } -polkadot-node-primitives = { path = "../node/primitives" } -sc-tracing = { path = "../../substrate/client/tracing", optional = true } -sc-sysinfo = { path = "../../substrate/client/sysinfo" } -sc-executor = { path = "../../substrate/client/executor" } -sc-storage-monitor = { path = "../../substrate/client/storage-monitor" } -sp-runtime = { path = "../../substrate/primitives/runtime" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +frame-benchmarking-cli = { optional = true, workspace = true, default-features = true } +sc-cli = { optional = true, workspace = true, default-features = true } +sc-service = { optional = true, workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sc-tracing = { optional = true, workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-storage-monitor = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [build-dependencies] -substrate-build-script-utils = { path = "../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = ["cli", "db", "full-node"] db = ["polkadot-service/db"] +metadata-hash = ["polkadot-service/metadata-hash"] service = ["dep:polkadot-service"] cli = [ "clap", diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index b89054b4dc32178b0c42b3f365392e5202b88bc5..62d99122c3012701aa46039b13c3c05d3331d8c3 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -192,7 +192,7 @@ where F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration), { let runner = cli - .create_runner_with_logger_hook::(&cli.run.base, logger_hook) + .create_runner_with_logger_hook::<_, _, F>(&cli.run.base, logger_hook) .map_err(Error::from)?; let chain_spec = &runner.config().chain_spec; diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 7d94196fa26dbbe95bfe543243c62099f1b4cb03..800434670f83bada72c95423081d89cd1ddeb46d 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -10,11 +10,11 @@ license.workspace = true workspace = true [dependencies] -sp-core = { path = "../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +codec = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index 3c14fd95eee3b29988a87b68047f585c32c5bbc0..969742c5bb0aa792ea81b287f588b1315a48971d 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -10,17 +10,17 @@ license.workspace = true workspace = true [dependencies] -polkadot-primitives = { path = "../primitives" } -polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" } -novelpoly = { package = "reed-solomon-novelpoly", version = "2.0.0" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "std"] } -sp-core = { path = "../../substrate/primitives/core" } -sp-trie = { path = "../../substrate/primitives/trie" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +novelpoly = { workspace = true } +codec = { features = ["derive", "std"], workspace = true } +sp-core = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] -quickcheck = { version = "1.0.3", default-features = false } -criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] } +quickcheck = { workspace = true } +criterion = { features = ["cargo_bench_support"], workspace = true } [[bench]] name = "scaling_with_validators" diff --git a/polkadot/erasure-coding/fuzzer/Cargo.toml b/polkadot/erasure-coding/fuzzer/Cargo.toml index bd254f6d51651d27ac2a992285c80f9e1e272623..6f451f0319b23dee9ebbf08726dd4550f518e95d 100644 --- a/polkadot/erasure-coding/fuzzer/Cargo.toml +++ b/polkadot/erasure-coding/fuzzer/Cargo.toml @@ -10,10 +10,10 @@ publish = false workspace = true [dependencies] -polkadot-erasure-coding = { path = ".." } -honggfuzz = "0.5" -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../../node/primitives" } +polkadot-erasure-coding = { workspace = true, default-features = true } +honggfuzz = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } [[bin]] name = "reconstruct" diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index da5d10d7994970dde81b8530888c28c56dffb0b0..4b0a5f7248ab383884365105b164f27f2a090bf1 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -10,21 +10,21 @@ description = "Collator-side subsystem that handles incoming candidate submissio workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../gum" } -polkadot-erasure-coding = { path = "../../erasure-coding" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-primitives = { path = "../../primitives" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } thiserror = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } +codec = { features = ["bit-vec", "derive"], workspace = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -assert_matches = "1.4.0" -rstest = "0.18.2" -sp-keyring = { path = "../../../substrate/primitives/keyring" } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } +sp-keyring = { workspace = true, default-features = true } diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 0c2f8ee14a58033dce47947a4e923ea644213831..d38516a4ff713f32c98c4e616155c4cdffb8bfb8 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -147,11 +147,7 @@ impl CollationGenerationSubsystem { Ok(FromOrchestra::Communication { msg: CollationGenerationMessage::Reinitialize(config), }) => { - if self.config.is_none() { - gum::error!(target: LOG_TARGET, "no initial initialization"); - } else { - self.config = Some(Arc::new(config)); - } + self.config = Some(Arc::new(config)); false }, Ok(FromOrchestra::Communication { diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 7da3d7ddd781485d673b29980b6787b99aea9da6..65985c0a5db930cf94922a6c8fde6e549021e7ef 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -10,51 +10,51 @@ description = "Approval Voting Subsystem of the Polkadot node" workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -gum = { package = "tracing-gum", path = "../../gum" } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -schnellru = "0.2.1" -merlin = "3.0" -schnorrkel = "0.11.4" -kvdb = "0.13.0" -derive_more = "0.99.17" +futures = { workspace = true } +futures-timer = { workspace = true } +codec = { features = ["bit-vec", "derive"], workspace = true } +gum = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } +schnellru = { workspace = true } +merlin = { workspace = true, default-features = true } +schnorrkel = { workspace = true, default-features = true } +kvdb = { workspace = true } +derive_more = { workspace = true, default-features = true } thiserror = { workspace = true } -itertools = "0.11" +itertools = { workspace = true } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-overseer = { path = "../../overseer" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-jaeger = { path = "../../jaeger" } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } -sc-keystore = { path = "../../../../substrate/client/keystore", default-features = false } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common", default-features = false } -sp-consensus-slots = { path = "../../../../substrate/primitives/consensus/slots", default-features = false } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto", default-features = false, features = ["full_crypto"] } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sc-keystore = { workspace = true } +sp-consensus = { workspace = true } +sp-consensus-slots = { workspace = true } +sp-application-crypto = { features = ["full_crypto"], workspace = true } +sp-runtime = { workspace = true } # rand_core should match schnorrkel -rand_core = "0.6.2" -rand_chacha = { version = "0.3.1" } -rand = "0.8.5" +rand_core = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } [dev-dependencies] -async-trait = "0.1.79" -parking_lot = "0.12.1" -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -assert_matches = "1.4.0" -kvdb-memorydb = "0.13.0" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +async-trait = { workspace = true } +parking_lot = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } log = { workspace = true, default-features = true } -env_logger = "0.11" +env_logger = { workspace = true } -polkadot-subsystem-bench = { path = "../../subsystem-bench" } +polkadot-subsystem-bench = { workspace = true } [[bench]] name = "approval-voting-regression-bench" diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 62f7ff0b61e64de81fa6b3c34987df042a865cbb..4274c8b576a3dc82cefbc22a7619bfe3206c181d 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -10,32 +10,32 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -kvdb = "0.13.0" +futures = { workspace = true } +futures-timer = { workspace = true } +kvdb = { workspace = true } thiserror = { workspace = true } -gum = { package = "tracing-gum", path = "../../gum" } -bitvec = "1.0.0" +gum = { workspace = true, default-features = true } +bitvec = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-overseer = { path = "../../overseer" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common", default-features = false } -polkadot-node-jaeger = { path = "../../jaeger" } +codec = { features = ["derive"], workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sp-consensus = { workspace = true } +polkadot-node-jaeger = { workspace = true, default-features = true } [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.11" -assert_matches = "1.4.0" -kvdb-memorydb = "0.13.0" +env_logger = { workspace = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } -sp-core = { path = "../../../../substrate/primitives/core" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -parking_lot = "0.12.1" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +sp-core = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index ffd6de076889413e6872e7c765d849798afb338a..1b52afc309bc9e301f879a720d253467a5359f13 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -10,28 +10,28 @@ description = "The Candidate Backing Subsystem. Tracks parachain candidates that workspace = true [dependencies] -futures = "0.3.30" -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-statement-table = { path = "../../../statement-table" } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -gum = { package = "tracing-gum", path = "../../gum" } +futures = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-statement-table = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } +gum = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = "0.1.1" -schnellru = "0.2.1" +fatality = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -futures = { version = "0.3.30", features = ["thread-pool"] } -assert_matches = "1.4.0" -rstest = "0.18.2" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 38e8a93bb0482c074e1e968090db33d16e0a284d..5bcd47a2434c71f3a6dd88e4c0456f467817deb7 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -102,6 +102,7 @@ use polkadot_node_subsystem_util::{ runtime::{ self, prospective_parachains_mode, request_min_backing_votes, ProspectiveParachainsMode, }, + vstaging::{fetch_claim_queue, ClaimQueueSnapshot}, Validator, }; use polkadot_primitives::{ @@ -212,8 +213,6 @@ struct PerRelayParentState { parent: Hash, /// Session index. session_index: SessionIndex, - /// The `ParaId` assigned to the local validator at this relay parent. - assigned_para: Option, /// The `CoreIndex` assigned to the local validator at this relay parent. assigned_core: Option, /// The candidates that are backed by enough validators in their group, by hash. @@ -233,8 +232,11 @@ struct PerRelayParentState { /// If true, we're appending extra bits in the BackedCandidate validator indices bitfield, /// which represent the assigned core index. True if ElasticScalingMVP is enabled. inject_core_index: bool, - /// The core states for all cores. - cores: Vec, + /// The number of cores. + n_cores: u32, + /// Claim queue state. If the runtime API is not available, it'll be populated with info from + /// availability cores. + claim_queue: ClaimQueueSnapshot, /// The validator index -> group mapping at this relay parent. validator_to_group: Arc>>, /// The associated group rotation information. @@ -825,8 +827,8 @@ async fn handle_communication( CandidateBackingMessage::Statement(relay_parent, statement) => { handle_statement_message(ctx, state, relay_parent, statement, metrics).await?; }, - CandidateBackingMessage::GetBackedCandidates(requested_candidates, tx) => - handle_get_backed_candidates_message(state, requested_candidates, tx, metrics)?, + CandidateBackingMessage::GetBackableCandidates(requested_candidates, tx) => + handle_get_backable_candidates_message(state, requested_candidates, tx, metrics)?, CandidateBackingMessage::CanSecond(request, tx) => handle_can_second_request(ctx, state, request, tx).await, } @@ -1004,20 +1006,19 @@ macro_rules! try_runtime_api { fn core_index_from_statement( validator_to_group: &IndexedVec>, group_rotation_info: &GroupRotationInfo, - cores: &[CoreState], + n_cores: u32, + claim_queue: &ClaimQueueSnapshot, statement: &SignedFullStatementWithPVD, ) -> Option { let compact_statement = statement.as_unchecked(); let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); - let n_cores = cores.len(); - gum::trace!( target:LOG_TARGET, ?group_rotation_info, ?statement, ?validator_to_group, - n_cores = ?cores.len(), + n_cores, ?candidate_hash, "Extracting core index from statement" ); @@ -1029,7 +1030,7 @@ fn core_index_from_statement( ?group_rotation_info, ?statement, ?validator_to_group, - n_cores = ?cores.len() , + n_cores, ?candidate_hash, "Invalid validator index: {:?}", statement_validator_index @@ -1038,37 +1039,25 @@ fn core_index_from_statement( }; // First check if the statement para id matches the core assignment. - let core_index = group_rotation_info.core_for_group(*group_index, n_cores); + let core_index = group_rotation_info.core_for_group(*group_index, n_cores as _); - if core_index.0 as usize > n_cores { + if core_index.0 > n_cores { gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); return None } if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { let candidate_para_id = candidate.descriptor.para_id; - let assigned_para_id = match &cores[core_index.0 as usize] { - CoreState::Free => { - gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); - return None - }, - CoreState::Occupied(occupied) => - if let Some(next) = &occupied.next_up_on_available { - next.para_id - } else { - return None - }, - CoreState::Scheduled(scheduled) => scheduled.para_id, - }; + let mut assigned_paras = claim_queue.iter_claims_for_core(&core_index); - if assigned_para_id != candidate_para_id { + if !assigned_paras.any(|id| id == &candidate_para_id) { gum::debug!( target: LOG_TARGET, ?candidate_hash, ?core_index, - ?assigned_para_id, + assigned_paras = ?claim_queue.iter_claims_for_core(&core_index).collect::>(), ?candidate_para_id, - "Invalid CoreIndex, core is assigned to a different para_id" + "Invalid CoreIndex, core is not assigned to this para_id" ); return None } @@ -1129,6 +1118,8 @@ async fn construct_per_relay_parent_state( Error::UtilError(TryFrom::try_from(e).expect("the conversion is infallible; qed")) })?; + let maybe_claim_queue = try_runtime_api!(fetch_claim_queue(ctx.sender(), parent).await); + let signing_context = SigningContext { parent_hash: parent, session_index }; let validator = match Validator::construct( &validators, @@ -1153,31 +1144,35 @@ async fn construct_per_relay_parent_state( let mut groups = HashMap::>::new(); let mut assigned_core = None; - let mut assigned_para = None; + + let has_claim_queue = maybe_claim_queue.is_some(); + let mut claim_queue = maybe_claim_queue.unwrap_or_default().0; for (idx, core) in cores.iter().enumerate() { - let core_para_id = match core { - CoreState::Scheduled(scheduled) => scheduled.para_id, - CoreState::Occupied(occupied) => - if mode.is_enabled() { + let core_index = CoreIndex(idx as _); + + if !has_claim_queue { + match core { + CoreState::Scheduled(scheduled) => + claim_queue.insert(core_index, [scheduled.para_id].into_iter().collect()), + CoreState::Occupied(occupied) if mode.is_enabled() => { // Async backing makes it legal to build on top of // occupied core. if let Some(next) = &occupied.next_up_on_available { - next.para_id + claim_queue.insert(core_index, [next.para_id].into_iter().collect()) } else { continue } - } else { - continue }, - CoreState::Free => continue, - }; + _ => continue, + }; + } else if !claim_queue.contains_key(&core_index) { + continue + } - let core_index = CoreIndex(idx as _); let group_index = group_rotation_info.group_for_core(core_index, n_cores); if let Some(g) = validator_groups.get(group_index.0 as usize) { if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assigned_para = Some(core_para_id); assigned_core = Some(core_index); } groups.insert(core_index, g.clone()); @@ -1212,7 +1207,6 @@ async fn construct_per_relay_parent_state( parent, session_index, assigned_core, - assigned_para, backed: HashSet::new(), table: Table::new(table_config), table_context, @@ -1221,7 +1215,8 @@ async fn construct_per_relay_parent_state( fallbacks: HashMap::new(), minimum_backing_votes, inject_core_index, - cores, + n_cores: cores.len() as u32, + claim_queue: ClaimQueueSnapshot::from(claim_queue), validator_to_group: validator_to_group.clone(), group_rotation_info, })) @@ -1674,7 +1669,8 @@ async fn import_statement( let core = core_index_from_statement( &rp_state.validator_to_group, &rp_state.group_rotation_info, - &rp_state.cores, + rp_state.n_cores, + &rp_state.claim_queue, statement, ) .ok_or(Error::CoreIndexUnavailable)?; @@ -2098,12 +2094,14 @@ async fn handle_second_message( return Ok(()) } + let assigned_paras = rp_state.assigned_core.and_then(|core| rp_state.claim_queue.0.get(&core)); + // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != rp_state.assigned_para { + if !matches!(assigned_paras, Some(paras) if paras.contains(&candidate.descriptor().para_id)) { gum::debug!( target: LOG_TARGET, our_assignment_core = ?rp_state.assigned_core, - our_assignment_para = ?rp_state.assigned_para, + our_assignment_paras = ?assigned_paras, collation = ?candidate.descriptor().para_id, "Subsystem asked to second for para outside of our assignment", ); @@ -2113,7 +2111,7 @@ async fn handle_second_message( gum::debug!( target: LOG_TARGET, our_assignment_core = ?rp_state.assigned_core, - our_assignment_para = ?rp_state.assigned_para, + our_assignment_paras = ?assigned_paras, collation = ?candidate.descriptor().para_id, "Current assignments vs collation", ); @@ -2160,7 +2158,7 @@ async fn handle_statement_message( } } -fn handle_get_backed_candidates_message( +fn handle_get_backable_candidates_message( state: &State, requested_candidates: HashMap>, tx: oneshot::Sender>>, diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index bb23c7fbeb24f741ce065f2285caaa7fdb34c922..10eb45b82d12544be09a188493d7114fc99c6d58 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -42,7 +42,10 @@ use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; use sp_tracing as _; -use std::{collections::HashMap, time::Duration}; +use std::{ + collections::{BTreeMap, HashMap, VecDeque}, + time::Duration, +}; mod prospective_parachains; @@ -75,6 +78,7 @@ pub(crate) struct TestState { validator_groups: (Vec>, GroupRotationInfo), validator_to_group: IndexedVec>, availability_cores: Vec, + claim_queue: BTreeMap>, head_data: HashMap, signing_context: SigningContext, relay_parent: Hash, @@ -130,6 +134,10 @@ impl Default for TestState { CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), ]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_a].into_iter().collect()); + claim_queue.insert(CoreIndex(1), [chain_b].into_iter().collect()); + let mut head_data = HashMap::new(); head_data.insert(chain_a, HeadData(vec![4, 5, 6])); head_data.insert(chain_b, HeadData(vec![5, 6, 7])); @@ -153,6 +161,7 @@ impl Default for TestState { validator_groups: (validator_groups, group_rotation_info), validator_to_group, availability_cores, + claim_queue, head_data, validation_data, signing_context, @@ -338,6 +347,26 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS tx.send(Ok(test_state.disabled_validators.clone())).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok( + test_state.claim_queue.clone() + )).unwrap(); + } + ); } async fn assert_validation_requests( @@ -674,7 +703,7 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( std::iter::once(( test_state.chain_ids[0], vec![(candidate_a_hash, test_state.relay_parent)], @@ -730,11 +759,16 @@ fn get_backed_candidate_preserves_order() { // Assign the second core to the same para as the first one. test_state.availability_cores[1] = CoreState::Scheduled(ScheduledCore { para_id: test_state.chain_ids[0], collator: None }); + *test_state.claim_queue.get_mut(&CoreIndex(1)).unwrap() = + [test_state.chain_ids[0]].into_iter().collect(); // Add another availability core for paraid 2. test_state.availability_cores.push(CoreState::Scheduled(ScheduledCore { para_id: test_state.chain_ids[1], collator: None, })); + test_state + .claim_queue + .insert(CoreIndex(2), [test_state.chain_ids[1]].into_iter().collect()); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; @@ -861,7 +895,7 @@ fn get_backed_candidate_preserves_order() { // Happy case, all candidates should be present. let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( [ ( test_state.chain_ids[0], @@ -912,7 +946,7 @@ fn get_backed_candidate_preserves_order() { ], ] { let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( [ (test_state.chain_ids[0], candidates), (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), @@ -951,7 +985,7 @@ fn get_backed_candidate_preserves_order() { ], ] { let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( [ (test_state.chain_ids[0], candidates), (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), @@ -996,7 +1030,7 @@ fn get_backed_candidate_preserves_order() { ], ] { let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( [ (test_state.chain_ids[0], candidates), (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), @@ -1103,7 +1137,8 @@ fn extract_core_index_from_statement_works() { let core_index_1 = core_index_from_statement( &test_state.validator_to_group, &test_state.validator_groups.1, - &test_state.availability_cores, + test_state.availability_cores.len() as _, + &test_state.claim_queue.clone().into(), &signed_statement_1, ) .unwrap(); @@ -1113,7 +1148,8 @@ fn extract_core_index_from_statement_works() { let core_index_2 = core_index_from_statement( &test_state.validator_to_group, &test_state.validator_groups.1, - &test_state.availability_cores, + test_state.availability_cores.len() as _, + &test_state.claim_queue.clone().into(), &signed_statement_2, ); @@ -1123,7 +1159,8 @@ fn extract_core_index_from_statement_works() { let core_index_3 = core_index_from_statement( &test_state.validator_to_group, &test_state.validator_groups.1, - &test_state.availability_cores, + test_state.availability_cores.len() as _, + &test_state.claim_queue.clone().into(), &signed_statement_3, ) .unwrap(); @@ -1284,7 +1321,7 @@ fn backing_works_while_validation_ongoing() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( std::iter::once(( test_state.chain_ids[0], vec![(candidate_a.hash(), test_state.relay_parent)], @@ -1905,7 +1942,7 @@ fn backing_works_after_failed_validation() { // Try to get a set of backable candidates to trigger _some_ action in the subsystem // and check that it is still alive. let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( std::iter::once(( test_state.chain_ids[0], vec![(candidate.hash(), test_state.relay_parent)], diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 74490c84eb18ba5521b4a189635c0326655973e5..15bc0b4a113902ab118bb24f5075eb947b9d874c 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -212,6 +212,26 @@ async fn activate_leaf( tx.send(Ok(Vec::new())).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == hash => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == hash => { + tx.send(Ok( + test_state.claim_queue.clone() + )).unwrap(); + } + ); } } diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 335e733987b017c0ee68f85d6dede42e3128b83c..126a18a141661c49411b576d9730e255ca627692 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -10,15 +10,15 @@ description = "Bitfield signing subsystem for the Polkadot node" workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -wasm-timer = "0.2.5" +futures = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +wasm-timer = { workspace = true } thiserror = { workspace = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index a0b25e6c25f9ccd3c5ab2e2ddd38b1f0aa0433a5..e1a98f80783fa472309d012eb097ad28dac72d46 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -10,29 +10,29 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } +async-trait = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } -sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", path = "../../../../substrate/primitives/maybe-compressed-blob" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +codec = { features = ["bit-vec", "derive"], workspace = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-parachain-primitives = { path = "../../../parachain" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-overseer = { path = "../../overseer" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } [target.'cfg(not(any(target_os = "android", target_os = "unknown")))'.dependencies] -polkadot-node-core-pvf = { path = "../pvf" } +polkadot-node-core-pvf = { workspace = true, default-features = true } [dev-dependencies] -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = { version = "0.3.30", features = ["thread-pool"] } -assert_matches = "1.4.0" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +sp-keyring = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } +assert_matches = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index c58024876b9c77c371b9325c9da922a21211e698..a8e911e0c5c9586c31109462e1a9527c9d8246f2 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -10,20 +10,20 @@ description = "The Chain API subsystem provides access to chain related utility workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-types = { path = "../../subsystem-types" } -sc-client-api = { path = "../../../../substrate/client/api" } -sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } [dev-dependencies] -futures = { version = "0.3.30", features = ["thread-pool"] } -maplit = "1.0.2" -codec = { package = "parity-scale-codec", version = "3.6.12" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } +futures = { features = ["thread-pool"], workspace = true } +maplit = { workspace = true } +codec = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 2aa929653ccc2a2fa07b16a7ad0a4ada025afb5a..755d5cadeaaf388a4e21eff79699ae06368e8010 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -10,20 +10,20 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -kvdb = "0.13.0" +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +kvdb = { workspace = true } thiserror = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } +codec = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core" } -parking_lot = "0.12.1" -assert_matches = "1" -kvdb-memorydb = "0.13.0" +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 2c08cfa9b1efa623ae157c6fd60f10c9330d30d2..eb4600b235b9f2cfde8f030ebb3b9626d6ea8869 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -10,33 +10,33 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -codec = { package = "parity-scale-codec", version = "3.6.12" } -kvdb = "0.13.0" +futures = { workspace = true } +gum = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +kvdb = { workspace = true } thiserror = { workspace = true } -schnellru = "0.2.1" -fatality = "0.1.1" +schnellru = { workspace = true } +fatality = { workspace = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } -sc-keystore = { path = "../../../../substrate/client/keystore" } +sc-keystore = { workspace = true, default-features = true } [dev-dependencies] -kvdb-memorydb = "0.13.0" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -assert_matches = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -futures-timer = "3.0.2" -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } +kvdb-memorydb = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +futures-timer = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] # If not enabled, the dispute coordinator will do nothing. diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 4f6090f90e9535a7cee8da2e333d5e66a0dea972..1e4953f40d0bd87bd5ef69bf3b91b66935e90fdc 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -10,13 +10,13 @@ description = "Parachains inherent data provider for Polkadot node" workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } thiserror = { workspace = true } -async-trait = "0.1.79" -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-overseer = { path = "../../overseer" } -polkadot-primitives = { path = "../../../primitives" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } +async-trait = { workspace = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index f3193153be8940760ae00e5dac59d837ce15660a..97da5a1e94a07947765ddc8311b23c8d03cb4f79 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -10,25 +10,26 @@ description = "The Prospective Parachains subsystem. Tracks and handles prospect workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -codec = { package = "parity-scale-codec", version = "3.6.12" } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = "0.1.1" -bitvec = "1" +fatality = { workspace = true } +bitvec = { workspace = true, default-features = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-node-subsystem-types = { path = "../../subsystem-types" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } +assert_matches = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +rstest = { workspace = true } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index d5bb5ff76ba8e8e603f2ab6b915b6c8fdb2d5f3c..e4b6deffdf4a5280e23c8c4a81bdf2ae56bbb422 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -44,6 +44,7 @@ use polkadot_node_subsystem_util::{ inclusion_emulator::{Constraints, RelayChainBlockInfo}, request_session_index_for_child, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + vstaging::fetch_claim_queue, }; use polkadot_primitives::{ async_backing::CandidatePendingAvailability, BlockNumber, CandidateHash, @@ -870,37 +871,51 @@ async fn fetch_backing_state( async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: Hash, -) -> JfyiErrorResult> { - let (tx, rx) = oneshot::channel(); - - // This'll have to get more sophisticated with parathreads, - // but for now we can just use the `AvailabilityCores`. - ctx.send_message(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::AvailabilityCores(tx), - )) - .await; - - let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; - let mut upcoming = HashSet::new(); - for core in cores { - match core { - CoreState::Occupied(occupied) => { - if let Some(next_up_on_available) = occupied.next_up_on_available { - upcoming.insert(next_up_on_available.para_id); - } - if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { - upcoming.insert(next_up_on_time_out.para_id); +) -> JfyiErrorResult> { + Ok(match fetch_claim_queue(ctx.sender(), relay_parent).await? { + Some(claim_queue) => { + // Runtime supports claim queue - use it + claim_queue + .iter_all_claims() + .flat_map(|(_, paras)| paras.into_iter()) + .copied() + .collect() + }, + None => { + // fallback to availability cores - remove this branch once claim queue is released + // everywhere + let (tx, rx) = oneshot::channel(); + ctx.send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) + .await; + + let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; + + let mut upcoming = HashSet::with_capacity(cores.len()); + for core in cores { + match core { + CoreState::Occupied(occupied) => { + // core sharing won't work optimally with this branch because the collations + // can't be prepared in advance. + if let Some(next_up_on_available) = occupied.next_up_on_available { + upcoming.insert(next_up_on_available.para_id); + } + if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { + upcoming.insert(next_up_on_time_out.para_id); + } + }, + CoreState::Scheduled(scheduled) => { + upcoming.insert(scheduled.para_id); + }, + CoreState::Free => {}, } - }, - CoreState::Scheduled(scheduled) => { - upcoming.insert(scheduled.para_id); - }, - CoreState::Free => {}, - } - } + } - Ok(upcoming.into_iter().collect()) + upcoming + }, + }) } // Fetch ancestors in descending order, up to the amount requested. diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index d2fc3cbd36235a09bf8ce8c38b231f3e0e27cf97..221fbf4c4e60365855b24bcfff7d89cfbcc6d205 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -26,11 +26,15 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, - CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, + CommittedCandidateReceipt, CoreIndex, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash, }; use polkadot_primitives_test_helpers::make_candidate; -use std::sync::Arc; +use rstest::rstest; +use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, +}; use test_helpers::mock::new_leaf; const ALLOWED_ANCESTRY_LEN: u32 = 3; @@ -70,7 +74,8 @@ fn dummy_constraints( } struct TestState { - availability_cores: Vec, + claim_queue: BTreeMap>, + runtime_api_version: u32, validation_code_hash: ValidationCodeHash, } @@ -79,13 +84,23 @@ impl Default for TestState { let chain_a = ParaId::from(1); let chain_b = ParaId::from(2); - let availability_cores = vec![ - CoreState::Scheduled(ScheduledCore { para_id: chain_a, collator: None }), - CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), - ]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_a].into_iter().collect()); + claim_queue.insert(CoreIndex(1), [chain_b].into_iter().collect()); + let validation_code_hash = Hash::repeat_byte(42).into(); - Self { availability_cores, validation_code_hash } + Self { + validation_code_hash, + claim_queue, + runtime_api_version: RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + } + } +} + +impl TestState { + fn set_runtime_api_version(&mut self, version: u32) { + self.runtime_api_version = version; } } @@ -227,12 +242,39 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) ) if parent == *hash => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + tx.send( + Ok(test_state.runtime_api_version) + ).unwrap(); } ); + if test_state.runtime_api_version < RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == *hash => { + tx.send(Ok(test_state.claim_queue.values().map(|paras| CoreState::Scheduled( + ScheduledCore { + para_id: *paras.front().unwrap(), + collator: None + } + )).collect())).unwrap(); + } + ); + } else { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == *hash => { + tx.send(Ok(test_state.claim_queue.clone())).unwrap(); + } + ); + } + send_block_header(virtual_overseer, *hash, *number).await; // Check that subsystem job issues a request for ancestors. @@ -277,14 +319,16 @@ async fn handle_leaf_activation( ); } - for _ in 0..test_state.availability_cores.len() { + let paras: HashSet<_> = test_state.claim_queue.values().flatten().collect(); + + for _ in 0..paras.len() { let message = virtual_overseer.recv().await; // Get the para we are working with since the order is not deterministic. - let para_id = match message { + let para_id = match &message { AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ParaBackingState(p_id, _), - )) => p_id, + )) => *p_id, _ => panic!("received unexpected message {:?}", message), }; @@ -505,9 +549,18 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { // - Two for the same leaf A (one for parachain 1 and one for parachain 2) // - One for leaf B on parachain 1 // - One for leaf C on parachain 2 +// Also tests a claim queue size larger than 1. #[test] fn introduce_candidates_basic() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); + + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_a, chain_b].into_iter().collect()); + + test_state.claim_queue = claim_queue; + let view = test_harness(|mut virtual_overseer| async move { // Leaf A let leaf_a = TestLeaf { @@ -2032,9 +2085,15 @@ fn check_pvd_query() { // Test simultaneously activating and deactivating leaves, and simultaneously deactivating // multiple leaves. -#[test] -fn correctly_updates_leaves() { - let test_state = TestState::default(); +// This test is parametrised with the runtime api version. For versions that don't support the claim +// queue API, we check that av-cores are used. +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +#[case(8)] +fn correctly_updates_leaves(#[case] runtime_api_version: u32) { + let mut test_state = TestState::default(); + test_state.set_runtime_api_version(runtime_api_version); + let view = test_harness(|mut virtual_overseer| async move { // Leaf A let leaf_a = TestLeaf { @@ -2140,15 +2199,12 @@ fn correctly_updates_leaves() { fn persists_pending_availability_candidate() { let mut test_state = TestState::default(); let para_id = ParaId::from(1); - test_state.availability_cores = test_state - .availability_cores + test_state.claim_queue = test_state + .claim_queue .into_iter() - .filter(|core| match core { - CoreState::Scheduled(scheduled_core) => scheduled_core.para_id == para_id, - _ => false, - }) + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) .collect(); - assert_eq!(test_state.availability_cores.len(), 1); + assert_eq!(test_state.claim_queue.len(), 1); test_harness(|mut virtual_overseer| async move { let para_head = HeadData(vec![1, 2, 3]); @@ -2237,18 +2293,15 @@ fn persists_pending_availability_candidate() { } #[test] -fn backwards_compatible() { +fn backwards_compatible_with_non_async_backing_params() { let mut test_state = TestState::default(); let para_id = ParaId::from(1); - test_state.availability_cores = test_state - .availability_cores + test_state.claim_queue = test_state + .claim_queue .into_iter() - .filter(|core| match core { - CoreState::Scheduled(scheduled_core) => scheduled_core.para_id == para_id, - _ => false, - }) + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) .collect(); - assert_eq!(test_state.availability_cores.len(), 1); + assert_eq!(test_state.claim_queue.len(), 1); test_harness(|mut virtual_overseer| async move { let para_head = HeadData(vec![1, 2, 3]); @@ -2350,20 +2403,30 @@ fn uses_ancestry_only_within_session() { .await; assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) - ) if parent == hash => { - tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len - })).unwrap(); } - ); + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::AsyncBackingParams(tx) + )) if parent == hash => { + tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len})).unwrap(); + }); assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == hash => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) ) if parent == hash => { - tx.send(Ok(Vec::new())).unwrap(); + tx.send(Ok(BTreeMap::new())).unwrap(); } ); diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index a81d22c6f82838c2d446a7d8cd0997013f161fa3..5869e494c70ff40365397d3ffa104f5bad6c421e 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -10,21 +10,21 @@ license.workspace = true workspace = true [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } +bitvec = { features = ["alloc"], workspace = true } +futures = { workspace = true } +gum = { workspace = true, default-features = true } thiserror = { workspace = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -futures-timer = "3.0.2" -fatality = "0.1.1" -schnellru = "0.2.1" +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +futures-timer = { workspace = true } +fatality = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -rstest = "0.18.2" +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +rstest = { workspace = true } diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index fa16b38d28bda4e364c5b6d8b74bb85ca727036d..3f622a60a059bbdb3062a90bd93d2661ad96d326 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -822,7 +822,7 @@ async fn select_candidates( // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); - sender.send_unbounded_message(CandidateBackingMessage::GetBackedCandidates( + sender.send_unbounded_message(CandidateBackingMessage::GetBackableCandidates( selected_candidates.clone(), tx, )); diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index 0d3675777cbf4ad0ef6620ae05b1b0bdab11cdb9..b38459302c8f18721b4ceb0515fdeac3d116cea8 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -578,7 +578,7 @@ mod select_candidates { )) => tx.send(Ok(Some(Default::default()))).unwrap(), AllMessages::RuntimeApi(Request(_parent_hash, AvailabilityCores(tx))) => tx.send(Ok(mock_availability_cores.clone())).unwrap(), - AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( + AllMessages::CandidateBacking(CandidateBackingMessage::GetBackableCandidates( hashes, sender, )) => { diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 6dec407e2d2d1f91bcdf72b73b1db376dcdd07d5..73ef17a2843aedcffc40c6456d5519e62265b605 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -10,24 +10,24 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" +futures = { workspace = true } thiserror = { workspace = true } -gum = { package = "tracing-gum", path = "../../gum" } +gum = { workspace = true, default-features = true } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-overseer = { path = "../../overseer" } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } +sp-keystore = { workspace = true, default-features = true } [dev-dependencies] -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -futures-timer = "3.0.2" +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } +futures-timer = { workspace = true } diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 8aebe0b4c3f0c3875781ed537eadd4887ccf856b..7444f7927f568359fe2a33392468b1f38d1e400f 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -10,60 +10,60 @@ license.workspace = true workspace = true [dependencies] -always-assert = "0.1" -array-bytes = "6.2.2" -blake3 = "1.5" -cfg-if = "1.0" -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -is_executable = { version = "1.0.1", optional = true } -pin-project = "1.0.9" -rand = "0.8.5" -slotmap = "1.0" -tempfile = "3.3.0" +always-assert = { workspace = true } +array-bytes = { workspace = true, default-features = true } +blake3 = { workspace = true } +cfg-if = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +is_executable = { optional = true, workspace = true } +pin-project = { workspace = true } +rand = { workspace = true, default-features = true } +slotmap = { workspace = true } +tempfile = { workspace = true } thiserror = { workspace = true } -tokio = { version = "1.24.2", features = ["fs", "process"] } +tokio = { features = ["fs", "process"], workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } -polkadot-parachain-primitives = { path = "../../../parachain" } -polkadot-core-primitives = { path = "../../../core-primitives" } -polkadot-node-core-pvf-common = { path = "common" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-primitives = { path = "../../../primitives" } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-compressed-blob", optional = true } -polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker", optional = true } -polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = true } +sp-core = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf-prepare-worker = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf-execute-worker = { optional = true, workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1.4.0" -criterion = { version = "0.5.1", default-features = false, features = [ +assert_matches = { workspace = true } +criterion = { features = [ "async_tokio", "cargo_bench_support", -] } -hex-literal = "0.4.1" +], workspace = true } +hex-literal = { workspace = true, default-features = true } -polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } +polkadot-node-core-pvf-common = { features = ["test-utils"], workspace = true, default-features = true } # For benches and integration tests, depend on ourselves with the test-utils # feature. -polkadot-node-core-pvf = { path = "", features = ["test-utils"] } -rococo-runtime = { path = "../../../runtime/rococo" } +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } +rococo-runtime = { workspace = true } -test-parachain-adder = { path = "../../../parachain/test-parachains/adder" } -test-parachain-halt = { path = "../../../parachain/test-parachains/halt" } +test-parachain-adder = { workspace = true } +test-parachain-halt = { workspace = true } [target.'cfg(target_os = "linux")'.dev-dependencies] libc = "0.2.153" procfs = "0.16.0" rusty-fork = "0.3.0" -sc-sysinfo = { path = "../../../../substrate/client/sysinfo" } +sc-sysinfo = { workspace = true, default-features = true } [[bench]] name = "host_prepare_rococo_runtime" diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 491f6cc49642cb3202f87bf189e7467cfb607230..18b3f959c95513763addc9ddf4ff82783e5c1a02 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -10,29 +10,29 @@ license.workspace = true workspace = true [dependencies] -cpu-time = "1.0.0" -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../../gum" } -libc = "0.2.152" -nix = { version = "0.28.0", features = ["resource", "sched"] } +cpu-time = { workspace = true } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +libc = { workspace = true } +nix = { features = ["resource", "sched"], workspace = true } thiserror = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } -polkadot-parachain-primitives = { path = "../../../../parachain" } -polkadot-primitives = { path = "../../../../primitives" } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } -sc-executor = { path = "../../../../../substrate/client/executor" } -sc-executor-common = { path = "../../../../../substrate/client/executor/common" } -sc-executor-wasmtime = { path = "../../../../../substrate/client/executor/wasmtime" } +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } -sp-core = { path = "../../../../../substrate/primitives/core" } -sp-crypto-hashing = { path = "../../../../../substrate/primitives/crypto/hashing" } -sp-externalities = { path = "../../../../../substrate/primitives/externalities" } -sp-io = { path = "../../../../../substrate/primitives/io" } -sp-tracing = { path = "../../../../../substrate/primitives/tracing" } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.3.0" @@ -41,8 +41,8 @@ landlock = "0.3.0" seccompiler = "0.4.0" [dev-dependencies] -assert_matches = "1.4.0" -tempfile = "3.3.0" +assert_matches = { workspace = true } +tempfile = { workspace = true } [features] # This feature is used to export test code to other crates without putting it in the production build. diff --git a/polkadot/node/core/pvf/common/src/executor_interface.rs b/polkadot/node/core/pvf/common/src/executor_interface.rs index 87491e70c5f2a0a06b22c8e2c902561d0745dbfd..47f9ed1604e781c045ea643d251da8e324c98287 100644 --- a/polkadot/node/core/pvf/common/src/executor_interface.rs +++ b/polkadot/node/core/pvf/common/src/executor_interface.rs @@ -215,19 +215,19 @@ type HostFunctions = ( struct ValidationExternalities(sp_externalities::Extensions); impl sp_externalities::Externalities for ValidationExternalities { - fn storage(&self, _: &[u8]) -> Option> { + fn storage(&mut self, _: &[u8]) -> Option> { panic!("storage: unsupported feature for parachain validation") } - fn storage_hash(&self, _: &[u8]) -> Option> { + fn storage_hash(&mut self, _: &[u8]) -> Option> { panic!("storage_hash: unsupported feature for parachain validation") } - fn child_storage_hash(&self, _: &ChildInfo, _: &[u8]) -> Option> { + fn child_storage_hash(&mut self, _: &ChildInfo, _: &[u8]) -> Option> { panic!("child_storage_hash: unsupported feature for parachain validation") } - fn child_storage(&self, _: &ChildInfo, _: &[u8]) -> Option> { + fn child_storage(&mut self, _: &ChildInfo, _: &[u8]) -> Option> { panic!("child_storage: unsupported feature for parachain validation") } @@ -275,11 +275,11 @@ impl sp_externalities::Externalities for ValidationExternalities { panic!("child_storage_root: unsupported feature for parachain validation") } - fn next_child_storage_key(&self, _: &ChildInfo, _: &[u8]) -> Option> { + fn next_child_storage_key(&mut self, _: &ChildInfo, _: &[u8]) -> Option> { panic!("next_child_storage_key: unsupported feature for parachain validation") } - fn next_storage_key(&self, _: &[u8]) -> Option> { + fn next_storage_key(&mut self, _: &[u8]) -> Option> { panic!("next_storage_key: unsupported feature for parachain validation") } diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index cf5b873e29d771e3806c8ec3085a7385b0d59b0b..f24b66dc4a0e8ba20f6b7a8205c7092a47c30245 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -10,17 +10,17 @@ license.workspace = true workspace = true [dependencies] -cpu-time = "1.0.0" -gum = { package = "tracing-gum", path = "../../../gum" } -cfg-if = "1.0" -nix = { version = "0.28.0", features = ["process", "resource", "sched"] } -libc = "0.2.152" +cpu-time = { workspace = true } +gum = { workspace = true, default-features = true } +cfg-if = { workspace = true } +nix = { features = ["process", "resource", "sched"], workspace = true } +libc = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } -polkadot-node-core-pvf-common = { path = "../common" } -polkadot-parachain-primitives = { path = "../../../../parachain" } -polkadot-primitives = { path = "../../../../primitives" } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } [features] builder = [] diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index f7daa0d7a89c3e65b3e6dee9ad3bcbe1e9ecf60e..9e0d01fc438b0257b87b9bf6791f466d8691049e 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -10,23 +10,23 @@ license.workspace = true workspace = true [dependencies] -blake3 = "1.5" -cfg-if = "1.0" -gum = { package = "tracing-gum", path = "../../../gum" } -libc = "0.2.152" -rayon = "1.5.1" -tracking-allocator = { package = "staging-tracking-allocator", path = "../../../tracking-allocator" } -tikv-jemalloc-ctl = { version = "0.5.0", optional = true } -tikv-jemallocator = { version = "0.5.0", optional = true } -nix = { version = "0.28.0", features = ["process", "resource", "sched"] } +blake3 = { workspace = true } +cfg-if = { workspace = true } +gum = { workspace = true, default-features = true } +libc = { workspace = true } +rayon = { workspace = true } +tracking-allocator = { workspace = true, default-features = true } +tikv-jemalloc-ctl = { optional = true, workspace = true } +tikv-jemallocator = { optional = true, workspace = true } +nix = { features = ["process", "resource", "sched"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } -polkadot-node-core-pvf-common = { path = "../common" } -polkadot-primitives = { path = "../../../../primitives" } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } -sc-executor-common = { path = "../../../../../substrate/client/executor/common" } -sc-executor-wasmtime = { path = "../../../../../substrate/client/executor/wasmtime" } +sc-executor-common = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemallocator = "0.5.0" @@ -41,9 +41,9 @@ jemalloc-allocator = [ ] [dev-dependencies] -criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] } -rococo-runtime = { path = "../../../../runtime/rococo" } -sp-maybe-compressed-blob = { path = "../../../../../substrate/primitives/maybe-compressed-blob" } +criterion = { features = ["cargo_bench_support"], workspace = true } +rococo-runtime = { workspace = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } [[bench]] name = "prepare_rococo_runtime" diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 5524cc705457e45e18e6ea9919c567bf796225c7..834e4b300b9eba67c4764ac60e00261b66215250 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -10,23 +10,23 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -schnellru = "0.2.1" +futures = { workspace = true } +gum = { workspace = true, default-features = true } +schnellru = { workspace = true } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } +sp-consensus-babe = { workspace = true, default-features = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-types = { path = "../../subsystem-types" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } [dev-dependencies] -sp-api = { path = "../../../../substrate/primitives/api" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -async-trait = "0.1.79" -futures = { version = "0.3.30", features = ["thread-pool"] } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +async-trait = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index 0d887b9be5394c6b36f882d60f57d4ee2d9bf8eb..9b2df435a06a90e4cb0561e19ce4a85dd281b127 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -10,7 +10,7 @@ description = "Stick logs together with the TraceID as provided by tempo" workspace = true [dependencies] -coarsetime = "0.1.22" -tracing = "0.1.35" -gum-proc-macro = { package = "tracing-gum-proc-macro", path = "proc-macro" } -polkadot-primitives = { path = "../../primitives", features = ["std"] } +coarsetime = { workspace = true } +tracing = { workspace = true, default-features = true } +gum-proc-macro = { workspace = true, default-features = true } +polkadot-primitives = { features = ["std"], workspace = true, default-features = true } diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index 70126b4f43367ce11a1a23d462392b513ca1c028..da6364977cae25f16e8605f8024dc96aa86f6120 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -18,12 +18,12 @@ proc-macro = true [dependencies] syn = { features = ["extra-traits", "full"], workspace = true } quote = { workspace = true } -proc-macro2 = "1.0.56" -proc-macro-crate = "3.0.0" -expander = "2.0.0" +proc-macro2 = { workspace = true } +proc-macro-crate = { workspace = true } +expander = { workspace = true } [dev-dependencies] -assert_matches = "1.5.0" +assert_matches = { workspace = true } [features] diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index 18b0c417aaf3d5798d81b8a2d4b11e4ddcc5f5fc..90a6c80e3d0bd3ab2567933bf94d89b7a00dc1e9 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -10,15 +10,15 @@ description = "Polkadot Jaeger primitives, but equally useful for Grafana/Tempo" workspace = true [dependencies] -mick-jaeger = "0.1.8" -lazy_static = "1.4" -parking_lot = "0.12.1" -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-types = { path = "../../../substrate/client/network/types" } -sp-core = { path = "../../../substrate/primitives/core" } +mick-jaeger = { workspace = true } +lazy_static = { workspace = true } +parking_lot = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } thiserror = { workspace = true } -tokio = "1.37" +tokio = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index fec148f7d38150936d733dfadf2030f1eea2c76c..49434606a61c8dff08a7c075c06c998cc8395a5b 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -29,40 +29,40 @@ path = "../../src/bin/prepare-worker.rs" doc = false [dependencies] -polkadot-cli = { path = "../../cli", features = ["malus", "rococo-native", "westend-native"] } -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-core-dispute-coordinator = { path = "../core/dispute-coordinator" } -polkadot-node-core-candidate-validation = { path = "../core/candidate-validation" } -polkadot-node-core-backing = { path = "../core/backing" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-primitives = { path = "../../primitives" } -color-eyre = { version = "0.6.1", default-features = false } -assert_matches = "1.5" -async-trait = "0.1.79" -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-core = { path = "../../../substrate/primitives/core" } -clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../gum" } -polkadot-erasure-coding = { path = "../../erasure-coding" } -rand = "0.8.5" +polkadot-cli = { features = ["malus", "rococo-native", "westend-native"], workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-core-dispute-coordinator = { workspace = true, default-features = true } +polkadot-node-core-candidate-validation = { workspace = true, default-features = true } +polkadot-node-core-backing = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +color-eyre = { workspace = true } +assert_matches = { workspace = true } +async-trait = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +clap = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } # Required for worker binaries to build. -polkadot-node-core-pvf-common = { path = "../core/pvf/common" } -polkadot-node-core-pvf-execute-worker = { path = "../core/pvf/execute-worker" } -polkadot-node-core-pvf-prepare-worker = { path = "../core/pvf/prepare-worker" } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-node-core-pvf-execute-worker = { workspace = true, default-features = true } +polkadot-node-core-pvf-prepare-worker = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.30", features = ["thread-pool"] } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } [build-dependencies] -substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = [] diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index 55df8d3daf6d17d142aa7980884c5ad9b4f0e568..41b08b66e9b4881bf93285b02076714bf41dfb0f 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -10,32 +10,34 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../gum" } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } -metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } +metered = { features = ["futures_channel"], workspace = true } # Both `sc-service` and `sc-cli` are required by runtime metrics `logger_hook()`. -sc-service = { path = "../../../substrate/client/service" } -sc-cli = { path = "../../../substrate/client/cli" } +sc-service = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } -sc-tracing = { path = "../../../substrate/client/tracing" } -codec = { package = "parity-scale-codec", version = "3.6.12" } -polkadot-primitives = { path = "../../primitives" } -bs58 = { version = "0.5.0", features = ["alloc"] } +prometheus-endpoint = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +bs58 = { features = ["alloc"], workspace = true, default-features = true } log = { workspace = true, default-features = true } [dev-dependencies] -assert_cmd = "2.0.4" -tempfile = "3.2.0" -hyper = { version = "0.14.20", default-features = false, features = ["http1", "tcp"] } -tokio = "1.37" -polkadot-test-service = { path = "../test/service", features = ["runtime-metrics"] } -substrate-test-utils = { path = "../../../substrate/test-utils" } -sc-service = { path = "../../../substrate/client/service" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -prometheus-parse = { version = "0.2.2" } +assert_cmd = { workspace = true } +tempfile = { workspace = true } +hyper-util = { features = ["client-legacy", "tokio"], workspace = true } +hyper = { workspace = true } +http-body-util = { workspace = true } +tokio = { workspace = true, default-features = true } +polkadot-test-service = { features = ["runtime-metrics"], workspace = true } +substrate-test-utils = { workspace = true } +sc-service = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +prometheus-parse = { workspace = true } [features] default = [] diff --git a/polkadot/node/metrics/src/tests.rs b/polkadot/node/metrics/src/tests.rs index fde7c314413462799f82aa727b0a5a1a68bbd5f6..e720924feb60c6d4ef3828d52312f96521a6f989 100644 --- a/polkadot/node/metrics/src/tests.rs +++ b/polkadot/node/metrics/src/tests.rs @@ -16,7 +16,9 @@ //! Polkadot runtime metrics integration test. -use hyper::{Client, Uri}; +use http_body_util::BodyExt; +use hyper::Uri; +use hyper_util::{client::legacy::Client, rt::TokioExecutor}; use polkadot_primitives::metric_definitions::PARACHAIN_INHERENT_DATA_BITFIELDS_PROCESSED; use polkadot_test_service::{node_config, run_validator_node, test_prometheus_config}; use sp_keyring::AccountKeyring::*; @@ -66,14 +68,20 @@ async fn runtime_can_publish_metrics() { } async fn scrape_prometheus_metrics(metrics_uri: &str) -> HashMap { - let res = Client::new() + let res = Client::builder(TokioExecutor::new()) + .build_http::>() .get(Uri::try_from(metrics_uri).expect("bad URI")) .await .expect("GET request failed"); // Retrieve the `HTTP` response body. let body = String::from_utf8( - hyper::body::to_bytes(res).await.expect("can't get body as bytes").to_vec(), + res.into_body() + .collect() + .await + .expect("can't get body as bytes") + .to_bytes() + .to_vec(), ) .expect("body is not an UTF8 string"); diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index d80519b9e2e95aa4d958bff5b9a08e3bddb2cf3c..a85cde303b61bfb9281c022b68ea220af18251e6 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -10,32 +10,32 @@ license.workspace = true workspace = true [dependencies] -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-jaeger = { path = "../../jaeger" } -rand = "0.8" -itertools = "0.11" +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +itertools = { workspace = true } -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } [dev-dependencies] -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } -sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } +sp-authority-discovery = { workspace = true, default-features = true } +sp-core = { features = ["std"], workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } -assert_matches = "1.4.0" -schnorrkel = { version = "0.11.4", default-features = false } +assert_matches = { workspace = true } +schnorrkel = { workspace = true } # rand_core should match schnorrkel -rand_core = "0.6.2" -rand_chacha = "0.3.1" -env_logger = "0.11" +rand_core = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +env_logger = { workspace = true } log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index db3a0456d9adbe3bc94e5d69b0c3586a78206c8b..8c5574f244e4a0671e807cc46dcb1286bafaf3a2 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -10,35 +10,35 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["std"] } -polkadot-primitives = { path = "../../../primitives" } -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-primitives = { path = "../../primitives" } -sc-network = { path = "../../../../substrate/client/network" } -sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-core = { features = ["std"], workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } thiserror = { workspace = true } -rand = "0.8.5" -derive_more = "0.99.17" -schnellru = "0.2.1" -fatality = "0.1.1" +rand = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +schnellru = { workspace = true } +fatality = { workspace = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sc-network = { path = "../../../../substrate/client/network" } -futures-timer = "3.0.2" -assert_matches = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -rstest = "0.18.2" -polkadot-subsystem-bench = { path = "../../subsystem-bench" } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { features = ["std"], workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +futures-timer = { workspace = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +rstest = { workspace = true } +polkadot-subsystem-bench = { workspace = true } [[bench]] diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 1c9c861e6f733ebf7e23ae323d6685c9ce26a66b..41f09b1f7044358697fe923ab56f745bfdce22e7 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -10,39 +10,39 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -tokio = "1.37" -schnellru = "0.2.1" -rand = "0.8.5" -fatality = "0.1.1" +futures = { workspace = true } +tokio = { workspace = true, default-features = true } +schnellru = { workspace = true } +rand = { workspace = true, default-features = true } +fatality = { workspace = true } thiserror = { workspace = true } -async-trait = "0.1.79" -gum = { package = "tracing-gum", path = "../../gum" } - -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-network-protocol = { path = "../protocol" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sc-network = { path = "../../../../substrate/client/network" } +async-trait = { workspace = true } +gum = { workspace = true, default-features = true } + +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sc-network = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1.4.0" -futures-timer = "3.0.2" -rstest = "0.18.2" +assert_matches = { workspace = true } +futures-timer = { workspace = true } +rstest = { workspace = true } log = { workspace = true, default-features = true } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sc-network = { path = "../../../../substrate/client/network" } +sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -polkadot-subsystem-bench = { path = "../../subsystem-bench" } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +polkadot-subsystem-bench = { workspace = true } [[bench]] name = "availability-recovery-regression-bench" diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 6b5b784b7fd899713c4ccbce2f9dec649a50f933..b1becaf319d55068a811a6cd97dbbccc9702dcbb 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -10,26 +10,26 @@ license.workspace = true workspace = true [dependencies] -always-assert = "0.1" -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-network-protocol = { path = "../protocol" } -rand = "0.8" +always-assert = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -maplit = "1.0.2" +polkadot-node-subsystem-test-helpers = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +maplit = { workspace = true } log = { workspace = true, default-features = true } -env_logger = "0.11" -assert_matches = "1.4.0" -rand_chacha = "0.3.1" +env_logger = { workspace = true } +assert_matches = { workspace = true } +rand_chacha = { workspace = true, default-features = true } diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index cd4e00ee1e4c596ed8c9798fd0009c95f6950a93..b4b5743853cd6dc2b43cce5d6b8c4d63bb893c8b 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -10,28 +10,28 @@ license.workspace = true workspace = true [dependencies] -always-assert = "0.1" -async-trait = "0.1.79" -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sc-network = { path = "../../../../substrate/client/network" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-overseer = { path = "../../overseer" } -parking_lot = "0.12.1" -bytes = "1" -fatality = "0.1.1" +always-assert = { workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sc-network = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +bytes = { workspace = true, default-features = true } +fatality = { workspace = true } thiserror = { workspace = true } [dev-dependencies] -assert_matches = "1.4.0" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures-timer = "3" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +assert_matches = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +futures-timer = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/bridge/src/validator_discovery.rs b/polkadot/node/network/bridge/src/validator_discovery.rs index b11af8a8a089c4aa64b26312562636b039068a78..f0ef038d5eb40708165f41dcdc5310b5dd44c951 100644 --- a/polkadot/node/network/bridge/src/validator_discovery.rs +++ b/polkadot/node/network/bridge/src/validator_discovery.rs @@ -88,16 +88,6 @@ impl Service { { gum::warn!(target: LOG_TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress"); } - // the addresses are known to be valid - // - // for peer-set management, the main protocol name should be used regardless of - // the negotiated version. - let _ = network_service - .remove_from_peers_set( - self.peerset_protocol_names.get_main_name(peer_set), - peers_to_remove, - ) - .await; network_service } diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index a56c1c7dfe9865dd82b78dda40a5e3aec228daf3..d41fc7ebe8ddb292aadd281c0b34288d8eb49e10 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -10,38 +10,38 @@ license.workspace = true workspace = true [dependencies] -bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } -futures = "0.3.30" -futures-timer = "3" -gum = { package = "tracing-gum", path = "../../gum" } - -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } - -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-subsystem = { path = "../../subsystem" } -fatality = "0.1.1" +bitvec = { features = ["alloc"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } + +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } + +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +fatality = { workspace = true } thiserror = { workspace = true } -tokio-util = "0.7.1" +tokio-util = { workspace = true } [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.11" -assert_matches = "1.4.0" -rstest = "0.18.2" - -sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sc-network = { path = "../../../../substrate/client/network" } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["std"] } - -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +env_logger = { workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } + +sp-core = { features = ["std"], workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } + +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } [features] default = [] diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 80a85420b392b930f451689f22edffb4667a0588..5c201542eb560641c39c19cece9cc23fe076c90a 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -51,6 +51,7 @@ use polkadot_node_subsystem_util::{ get_availability_cores, get_group_rotation_info, prospective_parachains_mode, ProspectiveParachainsMode, RuntimeInfo, }, + vstaging::fetch_claim_queue, TimeoutExt, }; use polkadot_primitives::{ @@ -579,22 +580,27 @@ async fn determine_cores( let cores = get_availability_cores(sender, relay_parent).await?; let n_cores = cores.len(); let mut assigned_cores = Vec::new(); + let maybe_claim_queue = fetch_claim_queue(sender, relay_parent).await?; for (idx, core) in cores.iter().enumerate() { - let core_para_id = match core { - CoreState::Scheduled(scheduled) => Some(scheduled.para_id), - CoreState::Occupied(occupied) => - if relay_parent_mode.is_enabled() { - // With async backing we don't care about the core state, - // it is only needed for figuring our validators group. - Some(occupied.candidate_descriptor.para_id) - } else { - None - }, - CoreState::Free => None, + let core_is_scheduled = match maybe_claim_queue { + Some(ref claim_queue) => { + // Runtime supports claim queue - use it. + claim_queue + .iter_claims_for_core(&CoreIndex(idx as u32)) + .any(|para| para == ¶_id) + }, + None => match core { + CoreState::Scheduled(scheduled) if scheduled.para_id == para_id => true, + CoreState::Occupied(occupied) if relay_parent_mode.is_enabled() => + // With async backing we don't care about the core state, + // it is only needed for figuring our validators group. + occupied.next_up_on_available.as_ref().map(|c| c.para_id) == Some(para_id), + _ => false, + }, }; - if core_para_id == Some(para_id) { + if core_is_scheduled { assigned_cores.push(CoreIndex::from(idx as u32)); } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index a13e99df4ab477f14f09c147f31c8ea8eeeda7e4..13601ca7a0056508f06fdd3f2ed717ee4d2ea4b4 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -16,7 +16,11 @@ use super::*; -use std::{collections::HashSet, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, HashSet, VecDeque}, + sync::Arc, + time::Duration, +}; use assert_matches::assert_matches; use futures::{executor, future, Future}; @@ -66,7 +70,7 @@ struct TestState { group_rotation_info: GroupRotationInfo, validator_peer_id: Vec, relay_parent: Hash, - availability_cores: Vec, + claim_queue: BTreeMap>, local_peer_id: PeerId, collator_pair: CollatorPair, session_index: SessionIndex, @@ -105,8 +109,9 @@ impl Default for TestState { let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; - let availability_cores = - vec![CoreState::Scheduled(ScheduledCore { para_id, collator: None }), CoreState::Free]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [para_id].into_iter().collect()); + claim_queue.insert(CoreIndex(1), VecDeque::new()); let relay_parent = Hash::random(); @@ -133,7 +138,7 @@ impl Default for TestState { group_rotation_info, validator_peer_id, relay_parent, - availability_cores, + claim_queue, local_peer_id, collator_pair, session_index: 1, @@ -147,17 +152,14 @@ impl TestState { pub fn with_elastic_scaling() -> Self { let mut state = Self::default(); let para_id = state.para_id; - state - .availability_cores - .push(CoreState::Scheduled(ScheduledCore { para_id, collator: None })); - state - .availability_cores - .push(CoreState::Scheduled(ScheduledCore { para_id, collator: None })); + + state.claim_queue.insert(CoreIndex(2), [para_id].into_iter().collect()); + state.claim_queue.insert(CoreIndex(3), [para_id].into_iter().collect()); state } fn current_group_validator_indices(&self) -> &[ValidatorIndex] { - let core_num = self.availability_cores.len(); + let core_num = self.claim_queue.len(); let GroupIndex(group_idx) = self.group_rotation_info.group_for_core(CoreIndex(0), core_num); &self.session_info.validator_groups.get(GroupIndex::from(group_idx)).unwrap() } @@ -395,7 +397,36 @@ async fn distribute_collation_with_receipt( RuntimeApiRequest::AvailabilityCores(tx) )) => { assert_eq!(relay_parent, _relay_parent); - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + tx.send(Ok(test_state.claim_queue.values().map(|paras| + if let Some(para) = paras.front() { + CoreState::Scheduled(ScheduledCore { para_id: *para, collator: None }) + } else { + CoreState::Free + } + ).collect())).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::Version(tx) + )) => { + assert_eq!(relay_parent, _relay_parent); + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + // obtain the claim queue schedule. + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::ClaimQueue(tx) + )) => { + assert_eq!(relay_parent, _relay_parent); + tx.send(Ok(test_state.claim_queue.clone())).unwrap(); } ); diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 0a0a85fb1f2750a54c909b523ab54564652d3354..ea8fdb0e04fbe0ba29303b62ae1d45c63d43cdf4 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -19,7 +19,7 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; -use polkadot_primitives::{AsyncBackingParams, Header, OccupiedCore}; +use polkadot_primitives::{AsyncBackingParams, Header}; const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; @@ -665,90 +665,3 @@ fn advertise_and_send_collation_by_hash() { }, ) } - -/// Tests that collator distributes collation built on top of occupied core. -#[test] -fn advertise_core_occupied() { - let mut test_state = TestState::default(); - let candidate = - TestCandidateBuilder { para_id: test_state.para_id, ..Default::default() }.build(); - test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { - next_up_on_available: None, - occupied_since: 0, - time_out_at: 0, - next_up_on_time_out: None, - availability: BitVec::default(), - group_responsible: GroupIndex(0), - candidate_hash: candidate.hash(), - candidate_descriptor: candidate.descriptor, - }); - - let local_peer_id = test_state.local_peer_id; - let collator_pair = test_state.collator_pair.clone(); - - test_harness( - local_peer_id, - collator_pair, - ReputationAggregator::new(|_| true), - |mut test_harness| async move { - let virtual_overseer = &mut test_harness.virtual_overseer; - - let head_a = Hash::from_low_u64_be(128); - let head_a_num: u32 = 64; - - // Grandparent of head `a`. - let head_b = Hash::from_low_u64_be(130); - - // Set collating para id. - overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) - .await; - // Activated leaf is `a`, but the collation will be based on `b`. - update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await; - - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; - let candidate = TestCandidateBuilder { - para_id: test_state.para_id, - relay_parent: head_b, - pov_hash: pov.hash(), - ..Default::default() - } - .build(); - let candidate_hash = candidate.hash(); - distribute_collation_with_receipt( - virtual_overseer, - &test_state, - head_b, - true, - candidate, - pov, - Hash::zero(), - ) - .await; - - let validators = test_state.current_group_validator_authority_ids(); - let peer_ids = test_state.current_group_validator_peer_ids(); - - connect_peer( - virtual_overseer, - peer_ids[0], - CollationVersion::V2, - Some(validators[0].clone()), - ) - .await; - expect_declare_msg_v2(virtual_overseer, &test_state, &peer_ids[0]).await; - // Peer is aware of the leaf. - send_peer_view_change(virtual_overseer, &peer_ids[0], vec![head_a]).await; - - // Collation is advertised. - expect_advertise_collation_msg( - virtual_overseer, - &peer_ids[0], - head_b, - Some(vec![candidate_hash]), - ) - .await; - - test_harness - }, - ) -} diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 001df1fb3da9b24a3c1acffc049cc7433903aea8..96ffe9f13db3562429eb68cdfdc05da801c33863 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -270,7 +270,7 @@ impl Collations { // We don't need to fetch any other collation when we already have seconded one. CollationStatus::Seconded => None, CollationStatus::Waiting => - if !self.is_seconded_limit_reached(relay_parent_mode) { + if self.is_seconded_limit_reached(relay_parent_mode) { None } else { self.waiting_queue.pop_front() @@ -280,7 +280,7 @@ impl Collations { } } - /// Checks the limit of seconded candidates for a given para. + /// Checks the limit of seconded candidates. pub(super) fn is_seconded_limit_reached( &self, relay_parent_mode: ProspectiveParachainsMode, @@ -293,7 +293,7 @@ impl Collations { } else { 1 }; - self.seconded_count < seconded_limit + self.seconded_count >= seconded_limit } } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 9f037a983e51c33cb734ee65fc496541d1082bf2..f5c9726f3f6a509a4966720f5e2011d173a9d318 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -19,7 +19,7 @@ use futures::{ }; use futures_timer::Delay; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, future::Future, time::{Duration, Instant}, }; @@ -51,6 +51,7 @@ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + vstaging::fetch_claim_queue, }; use polkadot_primitives::{ CandidateHash, CollatorId, CoreState, Hash, HeadData, Id as ParaId, OccupiedCoreAssumption, @@ -362,8 +363,8 @@ impl PeerData { #[derive(Debug)] struct GroupAssignments { - /// Current assignment. - current: Option, + /// Current assignments. + current: Vec, } struct PerRelayParent { @@ -376,7 +377,7 @@ impl PerRelayParent { fn new(mode: ProspectiveParachainsMode) -> Self { Self { prospective_parachains_mode: mode, - assignment: GroupAssignments { current: None }, + assignment: GroupAssignments { current: vec![] }, collations: Collations::default(), } } @@ -491,34 +492,34 @@ where .await .map_err(Error::CancelledAvailabilityCores)??; - let para_now = match polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore) - .and_then(|(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index)) - { - Some(group) => { - let core_now = rotation_info.core_for_group(group, cores.len()); - - cores.get(core_now.0 as usize).and_then(|c| match c { - CoreState::Occupied(core) if relay_parent_mode.is_enabled() => Some(core.para_id()), - CoreState::Scheduled(core) => Some(core.para_id), - CoreState::Occupied(_) | CoreState::Free => None, - }) - }, - None => { - gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); - - return Ok(()) - }, + let core_now = if let Some(group) = + polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore).and_then( + |(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index), + ) { + rotation_info.core_for_group(group, cores.len()) + } else { + gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); + return Ok(()) }; - // This code won't work well, if at all for on-demand parachains. For on-demand we'll - // have to be aware of which core the on-demand claim is going to be multiplexed - // onto. The on-demand claim will also have a known collator, and we should always - // allow an incoming connection from that collator. If not even connecting to them - // directly. - // - // However, this'll work fine for parachains, as each parachain gets a dedicated - // core. - if let Some(para_id) = para_now.as_ref() { + let paras_now = match fetch_claim_queue(sender, relay_parent).await.map_err(Error::Runtime)? { + // Runtime supports claim queue - use it + // + // `relay_parent_mode` is not examined here because if the runtime supports claim queue + // then it supports async backing params too (`ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT` + // < `CLAIM_QUEUE_RUNTIME_REQUIREMENT`). + Some(mut claim_queue) => claim_queue.0.remove(&core_now), + // Claim queue is not supported by the runtime - use availability cores instead. + None => cores.get(core_now.0 as usize).and_then(|c| match c { + CoreState::Occupied(core) if relay_parent_mode.is_enabled() => + core.next_up_on_available.as_ref().map(|c| [c.para_id].into_iter().collect()), + CoreState::Scheduled(core) => Some([core.para_id].into_iter().collect()), + CoreState::Occupied(_) | CoreState::Free => None, + }), + } + .unwrap_or_else(|| VecDeque::new()); + + for para_id in paras_now.iter() { let entry = current_assignments.entry(*para_id).or_default(); *entry += 1; if *entry == 1 { @@ -531,7 +532,7 @@ where } } - *group_assignment = GroupAssignments { current: para_now }; + *group_assignment = GroupAssignments { current: paras_now.into_iter().collect() }; Ok(()) } @@ -542,7 +543,7 @@ fn remove_outgoing( ) { let GroupAssignments { current, .. } = per_relay_parent.assignment; - if let Some(cur) = current { + for cur in current { if let Entry::Occupied(mut occupied) = current_assignments.entry(cur) { *occupied.get_mut() -= 1; if *occupied.get() == 0 { @@ -857,7 +858,8 @@ async fn process_incoming_peer_message( peer_id = ?origin, ?collator_id, ?para_id, - "Declared as collator for unneeded para", + "Declared as collator for unneeded para. Current assignments: {:?}", + &state.current_assignments ); modify_reputation( @@ -1089,7 +1091,7 @@ where peer_data.collating_para().ok_or(AdvertisementError::UndeclaredCollator)?; // Check if this is assigned to us. - if assignment.current.map_or(true, |id| id != collator_para_id) { + if !assignment.current.contains(&collator_para_id) { return Err(AdvertisementError::InvalidAssignment) } @@ -1105,7 +1107,7 @@ where ) .map_err(AdvertisementError::Invalid)?; - if !per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { + if per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { return Err(AdvertisementError::SecondedLimitReached) } @@ -1197,7 +1199,7 @@ where }); let collations = &mut per_relay_parent.collations; - if !collations.is_seconded_limit_reached(relay_parent_mode) { + if collations.is_seconded_limit_reached(relay_parent_mode) { gum::trace!( target: LOG_TARGET, peer_id = ?peer_id, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 3f4459d8e65d1d092957273bb3c0a8b95b6648a8..44e25efd4dfcd30536fca2dc766dc1f18b1851b4 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -21,7 +21,12 @@ use sc_network::ProtocolName; use sp_core::{crypto::Pair, Encode}; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; -use std::{iter, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, VecDeque}, + iter, + sync::Arc, + time::Duration, +}; use polkadot_node_network_protocol::{ our_view, @@ -37,7 +42,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - CandidateReceipt, CollatorPair, CoreState, GroupIndex, GroupRotationInfo, HeadData, + CandidateReceipt, CollatorPair, CoreIndex, CoreState, GroupIndex, GroupRotationInfo, HeadData, OccupiedCore, PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::{ @@ -71,6 +76,7 @@ struct TestState { validator_groups: Vec>, group_rotation_info: GroupRotationInfo, cores: Vec, + claim_queue: BTreeMap>, } impl Default for TestState { @@ -104,7 +110,7 @@ impl Default for TestState { CoreState::Scheduled(ScheduledCore { para_id: chain_ids[0], collator: None }), CoreState::Free, CoreState::Occupied(OccupiedCore { - next_up_on_available: None, + next_up_on_available: Some(ScheduledCore { para_id: chain_ids[1], collator: None }), occupied_since: 0, time_out_at: 1, next_up_on_time_out: None, @@ -120,6 +126,11 @@ impl Default for TestState { }), ]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_ids[0]].into_iter().collect()); + claim_queue.insert(CoreIndex(1), VecDeque::new()); + claim_queue.insert(CoreIndex(2), [chain_ids[1]].into_iter().collect()); + Self { chain_ids, relay_parent, @@ -128,6 +139,7 @@ impl Default for TestState { validator_groups, group_rotation_info, cores, + claim_queue, } } } @@ -264,6 +276,26 @@ async fn respond_to_core_info_queries( let _ = tx.send(Ok(test_state.cores.clone())); } ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::Version(tx), + )) => { + let _ = tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ClaimQueue(tx), + )) => { + let _ = tx.send(Ok(test_state.claim_queue.clone())); + } + ); } /// Assert that the next message is a `CandidateBacking(Second())`. diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 178dcb85e035f05c4136f7d7d7e2433a3713d00c..472731b506ab1c5d59a756a9e80b47016bcceb6c 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -72,6 +72,26 @@ async fn assert_assign_incoming( tx.send(Ok(test_state.cores.clone())).unwrap(); } ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::Version(tx), + )) if parent == hash => { + let _ = tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ClaimQueue(tx), + )) if parent == hash => { + let _ = tx.send(Ok(test_state.claim_queue.clone())); + } + ); } /// Handle a view update. diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 08713209bb740737df26e36d2f4df7933914406b..ccf1b5daad7c3b7f3dd7d5525cb619781c01bce8 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -10,33 +10,33 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -derive_more = "0.99.17" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["std"] } -polkadot-primitives = { path = "../../../primitives" } -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-primitives = { path = "../../primitives" } -sc-network = { path = "../../../../substrate/client/network" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = "0.1.1" -schnellru = "0.2.1" -indexmap = "2.0.0" +fatality = { workspace = true } +schnellru = { workspace = true } +indexmap = { workspace = true } [dev-dependencies] -async-channel = "1.8.0" -async-trait = "0.1.79" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -futures-timer = "3.0.2" -assert_matches = "1.4.0" -lazy_static = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +async-channel = { workspace = true } +async-trait = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +futures-timer = { workspace = true } +assert_matches = { workspace = true } +lazy_static = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index 2d6f2f954c667a8a97aff093f1fa734f6afce9aa..83fdc7e26191e6b2568a9733b163a0f0239f6709 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -10,34 +10,34 @@ license.workspace = true workspace = true [dependencies] -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-crypto-hashing = { path = "../../../../substrate/primitives/crypto/hashing" } -sc-network = { path = "../../../../substrate/client/network" } -sc-network-common = { path = "../../../../substrate/client/network/common" } +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-primitives = { path = "../../../primitives" } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } -futures = "0.3.30" -futures-timer = "3.0.2" -rand = { version = "0.8.5", default-features = false } -rand_chacha = { version = "0.3.1", default-features = false } -gum = { package = "tracing-gum", path = "../../gum" } +futures = { workspace = true } +futures-timer = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +gum = { workspace = true, default-features = true } [dev-dependencies] -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } +sp-keyring = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +polkadot-node-subsystem-test-helpers = { workspace = true } -assert_matches = "1.4.0" -async-trait = "0.1.79" -parking_lot = "0.12.1" -lazy_static = "1.4.0" -quickcheck = "1.0.3" +assert_matches = { workspace = true } +async-trait = { workspace = true } +parking_lot = { workspace = true, default-features = true } +lazy_static = { workspace = true } +quickcheck = { workspace = true, default-features = true } diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 83145ce4013022b5567d45434c989bba732382c7..c9ae23d756cfc4d39d44286b7a567ed46969bf02 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -10,25 +10,25 @@ description = "Primitives types for the Node-side" workspace = true [dependencies] -async-channel = "1.8.0" -async-trait = "0.1.79" -hex = "0.4.3" -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-jaeger = { path = "../../jaeger" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sc-network = { path = "../../../../substrate/client/network" } -sc-network-types = { path = "../../../../substrate/client/network/types" } -sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -strum = { version = "0.26.2", features = ["derive"] } -futures = "0.3.30" +async-channel = { workspace = true } +async-trait = { workspace = true } +hex = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-authority-discovery = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +strum = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } thiserror = { workspace = true } -fatality = "0.1.1" -rand = "0.8" -derive_more = "0.99" -gum = { package = "tracing-gum", path = "../../gum" } -bitvec = "1" +fatality = { workspace = true } +rand = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } +bitvec = { workspace = true, default-features = true } [dev-dependencies] -rand_chacha = "0.3.1" +rand_chacha = { workspace = true, default-features = true } diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index b044acd1a86d372019cd93b1c628601ae43a465d..2a9773ddde4bd316d3133086131399829ffc5626 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -10,39 +10,39 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -sp-staking = { path = "../../../../substrate/primitives/staking", default-features = false } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-network-protocol = { path = "../protocol" } -arrayvec = "0.7.4" -indexmap = "2.0.0" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-staking = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +arrayvec = { workspace = true } +indexmap = { workspace = true } +codec = { features = ["derive"], workspace = true } thiserror = { workspace = true } -fatality = "0.1.1" -bitvec = "1" +fatality = { workspace = true } +bitvec = { workspace = true, default-features = true } [dev-dependencies] -async-channel = "1.8.0" -assert_matches = "1.4.0" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sc-network = { path = "../../../../substrate/client/network" } -futures-timer = "3.0.2" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -rand_chacha = "0.3" -polkadot-subsystem-bench = { path = "../../subsystem-bench" } +async-channel = { workspace = true } +assert_matches = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +futures-timer = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +polkadot-subsystem-bench = { workspace = true } [[bench]] name = "statement-distribution-regression-bench" diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index 4d56c795f13b29949a7a3b3e7f9bfc85e89ab274..33431eb1edce585943100e4c23d7234a39779f9c 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -284,7 +284,14 @@ impl StatementDistributionSubsystem { ); }, MuxedMessage::Response(result) => { - v2::handle_response(&mut ctx, &mut state, result, &mut self.reputation).await; + v2::handle_response( + &mut ctx, + &mut state, + result, + &mut self.reputation, + &self.metrics, + ) + .await; }, MuxedMessage::RetryRequest(()) => { // A pending request is ready to retry. This is only a signal to call @@ -320,7 +327,8 @@ impl StatementDistributionSubsystem { let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; if let ProspectiveParachainsMode::Enabled { .. } = mode { let res = - v2::handle_active_leaves_update(ctx, state, activated, mode).await; + v2::handle_active_leaves_update(ctx, state, activated, mode, &metrics) + .await; // Regardless of the result of leaf activation, we always prune before // handling it to avoid leaks. v2::handle_deactivate_leaves(state, &deactivated); @@ -370,6 +378,7 @@ impl StatementDistributionSubsystem { relay_parent, statement, &mut self.reputation, + &self.metrics, ) .await?; } @@ -428,11 +437,24 @@ impl StatementDistributionSubsystem { if target.targets_current() { // pass to v2. - v2::handle_network_update(ctx, state, event, &mut self.reputation).await; + v2::handle_network_update( + ctx, + state, + event, + &mut self.reputation, + &self.metrics, + ) + .await; } }, StatementDistributionMessage::Backed(candidate_hash) => { - crate::v2::handle_backed_candidate_message(ctx, state, candidate_hash).await; + crate::v2::handle_backed_candidate_message( + ctx, + state, + candidate_hash, + &self.metrics, + ) + .await; }, }, } diff --git a/polkadot/node/network/statement-distribution/src/metrics.rs b/polkadot/node/network/statement-distribution/src/metrics.rs index 1bc994174263905d3058154ead29eaaa16bd2ad4..e21fff1e6421e3c90df07c6c1c1237721ce33760 100644 --- a/polkadot/node/network/statement-distribution/src/metrics.rs +++ b/polkadot/node/network/statement-distribution/src/metrics.rs @@ -25,13 +25,13 @@ const HISTOGRAM_LATENCY_BUCKETS: &[f64] = &[ #[derive(Clone)] struct MetricsInner { // V1 - statements_distributed: prometheus::Counter, sent_requests: prometheus::Counter, received_responses: prometheus::CounterVec, network_bridge_update: prometheus::HistogramVec, statements_unexpected: prometheus::CounterVec, created_message_size: prometheus::Gauge, // V1+ + statements_distributed: prometheus::Counter, active_leaves_update: prometheus::Histogram, share: prometheus::Histogram, // V2+ @@ -51,6 +51,13 @@ impl Metrics { } } + /// Update statements distributed counter by an amount + pub fn on_statements_distributed(&self, n: usize) { + if let Some(metrics) = &self.0 { + metrics.statements_distributed.inc_by(n as u64); + } + } + /// Update sent requests counter /// This counter is updated merely for the statements sent via request/response method, /// meaning that it counts large statements only diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 73416b193bbec518fa24e608bdfc17c9de436b50..47d350849b206913b9e493cd9befd6086d0c4e7b 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -195,8 +195,8 @@ struct ActiveValidatorState { index: ValidatorIndex, // our validator group group: GroupIndex, - // the assignment of our validator group, if any. - assignment: Option, + // the assignments of our validator group, if any. + assignments: Vec, // the 'direct-in-group' communication at this relay-parent. cluster_tracker: ClusterTracker, } @@ -400,6 +400,7 @@ pub(crate) async fn handle_network_update( state: &mut State, update: NetworkBridgeEvent, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { match update { NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, mut authority_ids) => { @@ -483,23 +484,33 @@ pub(crate) async fn handle_network_update( net_protocol::StatementDistributionMessage::V3( protocol_v3::StatementDistributionMessage::Statement(relay_parent, statement), ) => - handle_incoming_statement(ctx, state, peer_id, relay_parent, statement, reputation) - .await, + handle_incoming_statement( + ctx, + state, + peer_id, + relay_parent, + statement, + reputation, + metrics, + ) + .await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateManifest(inner), ) | net_protocol::StatementDistributionMessage::V3( protocol_v3::StatementDistributionMessage::BackedCandidateManifest(inner), - ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation).await, + ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation, metrics).await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateKnown(inner), ) | net_protocol::StatementDistributionMessage::V3( protocol_v3::StatementDistributionMessage::BackedCandidateKnown(inner), - ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation).await, + ) => + handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation, metrics) + .await, }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => - handle_peer_view_update(ctx, state, peer_id, view).await, + handle_peer_view_update(ctx, state, peer_id, view, metrics).await, NetworkBridgeEvent::OurViewChange(_view) => { // handled by `handle_activated_leaf` }, @@ -539,6 +550,7 @@ pub(crate) async fn handle_active_leaves_update( state: &mut State, activated: &ActivatedLeaf, leaf_mode: ProspectiveParachainsMode, + metrics: &Metrics, ) -> JfyiErrorResult<()> { let max_candidate_depth = match leaf_mode { ProspectiveParachainsMode::Disabled => return Ok(()), @@ -714,7 +726,8 @@ pub(crate) async fn handle_active_leaves_update( for (peer, fresh) in update_peers { for fresh_relay_parent in fresh { - send_peer_messages_for_relay_parent(ctx, state, peer, fresh_relay_parent).await; + send_peer_messages_for_relay_parent(ctx, state, peer, fresh_relay_parent, metrics) + .await; } } } @@ -740,8 +753,8 @@ fn find_active_validator_state( let our_group = groups.by_validator_index(validator_index)?; let core_index = group_rotation_info.core_for_group(our_group, availability_cores.len()); - let para_assigned_to_core = if let Some(claim_queue) = maybe_claim_queue { - claim_queue.get_claim_for(core_index, 0) + let paras_assigned_to_core = if let Some(claim_queue) = maybe_claim_queue { + claim_queue.iter_claims_for_core(&core_index).copied().collect() } else { availability_cores .get(core_index.0 as usize) @@ -753,6 +766,8 @@ fn find_active_validator_state( .map(|scheduled_core| scheduled_core.para_id), CoreState::Free | CoreState::Occupied(_) => None, }) + .into_iter() + .collect() }; let group_validators = groups.get(our_group)?.to_owned(); @@ -760,7 +775,7 @@ fn find_active_validator_state( active: Some(ActiveValidatorState { index: validator_index, group: our_group, - assignment: para_assigned_to_core, + assignments: paras_assigned_to_core, cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) .expect("group is non-empty because we are in it; qed"), }), @@ -813,6 +828,7 @@ async fn handle_peer_view_update( state: &mut State, peer: PeerId, new_view: View, + metrics: &Metrics, ) { let fresh_implicit = { let peer_data = match state.peers.get_mut(&peer) { @@ -824,7 +840,7 @@ async fn handle_peer_view_update( }; for new_relay_parent in fresh_implicit { - send_peer_messages_for_relay_parent(ctx, state, peer, new_relay_parent).await; + send_peer_messages_for_relay_parent(ctx, state, peer, new_relay_parent, metrics).await; } } @@ -855,6 +871,7 @@ async fn send_peer_messages_for_relay_parent( state: &mut State, peer: PeerId, relay_parent: Hash, + metrics: &Metrics, ) { let peer_data = match state.peers.get_mut(&peer) { None => return, @@ -887,6 +904,7 @@ async fn send_peer_messages_for_relay_parent( &mut active.cluster_tracker, &state.candidates, &relay_parent_state.statement_store, + metrics, ) .await; } @@ -899,6 +917,7 @@ async fn send_peer_messages_for_relay_parent( &per_session_state.groups, relay_parent_state, &state.candidates, + metrics, ) .await; } @@ -947,6 +966,7 @@ async fn send_pending_cluster_statements( cluster_tracker: &mut ClusterTracker, candidates: &Candidates, statement_store: &StatementStore, + metrics: &Metrics, ) { let pending_statements = cluster_tracker.pending_statements_for(peer_validator_id); let network_messages = pending_statements @@ -972,12 +992,12 @@ async fn send_pending_cluster_statements( }) .collect::>(); - if network_messages.is_empty() { - return + if !network_messages.is_empty() { + let count = network_messages.len(); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(network_messages)) + .await; + metrics.on_statements_distributed(count); } - - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(network_messages)) - .await; } /// Send a peer all pending grid messages / acknowledgements / follow up statements @@ -991,6 +1011,7 @@ async fn send_pending_grid_messages( groups: &Groups, relay_parent_state: &mut PerRelayParentState, candidates: &Candidates, + metrics: &Metrics, ) { let pending_manifests = { let local_validator = match relay_parent_state.local_validator.as_mut() { @@ -1003,6 +1024,7 @@ async fn send_pending_grid_messages( }; let mut messages: Vec<(Vec, net_protocol::VersionedValidationProtocol)> = Vec::new(); + let mut statements_count = 0; for (candidate_hash, kind) in pending_manifests { let confirmed_candidate = match candidates.get_confirmed(&candidate_hash) { None => continue, // sanity @@ -1077,7 +1099,7 @@ async fn send_pending_grid_messages( }; }, grid::ManifestKind::Acknowledgement => { - messages.extend(acknowledgement_and_statement_messages( + let (m, c) = acknowledgement_and_statement_messages( peer_id, peer_validator_id, groups, @@ -1086,7 +1108,9 @@ async fn send_pending_grid_messages( group_index, candidate_hash, local_knowledge, - )); + ); + messages.extend(m); + statements_count += c; }, } } @@ -1105,8 +1129,9 @@ async fn send_pending_grid_messages( let pending_statements = grid_tracker.all_pending_statements_for(peer_validator_id); - let extra_statements = - pending_statements.into_iter().filter_map(|(originator, compact)| { + let extra_statements = pending_statements + .into_iter() + .filter_map(|(originator, compact)| { let res = pending_statement_network_message( &relay_parent_state.statement_store, relay_parent, @@ -1126,15 +1151,17 @@ async fn send_pending_grid_messages( } res - }); + }) + .collect::>(); + statements_count += extra_statements.len(); messages.extend(extra_statements); } - if messages.is_empty() { - return + if !messages.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; + metrics.on_statements_distributed(statements_count); } - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; } // Imports a locally originating statement and distributes it to peers. @@ -1145,6 +1172,7 @@ pub(crate) async fn share_local_statement( relay_parent: Hash, statement: SignedFullStatementWithPVD, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) -> JfyiErrorResult<()> { let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { None => return Err(JfyiError::InvalidShare), @@ -1162,10 +1190,10 @@ pub(crate) async fn share_local_statement( None => return Ok(()), }; - let (local_index, local_assignment, local_group) = + let (local_index, local_assignments, local_group) = match per_relay_parent.active_validator_state() { None => return Err(JfyiError::InvalidShare), - Some(l) => (l.index, l.assignment, l.group), + Some(l) => (l.index, &l.assignments, l.group), }; // Two possibilities: either the statement is `Seconded` or we already @@ -1203,7 +1231,7 @@ pub(crate) async fn share_local_statement( return Err(JfyiError::InvalidShare) } - if local_assignment != Some(expected_para) || relay_parent != expected_relay_parent { + if !local_assignments.contains(&expected_para) || relay_parent != expected_relay_parent { return Err(JfyiError::InvalidShare) } @@ -1267,11 +1295,12 @@ pub(crate) async fn share_local_statement( &state.authorities, &state.peers, compact_statement, + metrics, ) .await; if let Some(post_confirmation) = post_confirmation { - apply_post_confirmation(ctx, state, post_confirmation, reputation).await; + apply_post_confirmation(ctx, state, post_confirmation, reputation, metrics).await; } Ok(()) @@ -1308,6 +1337,7 @@ async fn circulate_statement( authorities: &HashMap, peers: &HashMap, statement: SignedStatement, + metrics: &Metrics, ) { let session_info = &per_session.session_info; @@ -1444,6 +1474,7 @@ async fn circulate_statement( .into(), )) .await; + metrics.on_statement_distributed(); } if !statement_to_v3_peers.is_empty() { @@ -1463,6 +1494,7 @@ async fn circulate_statement( .into(), )) .await; + metrics.on_statement_distributed(); } } /// Check a statement signature under this parent hash. @@ -1509,6 +1541,7 @@ async fn handle_incoming_statement( relay_parent: Hash, statement: UncheckedSignedStatement, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { let peer_state = match state.peers.get(&peer) { None => { @@ -1785,6 +1818,7 @@ async fn handle_incoming_statement( &state.authorities, &state.peers, checked_statement, + metrics, ) .await; } else { @@ -1942,6 +1976,7 @@ async fn provide_candidate_to_grid( per_session: &PerSessionState, authorities: &HashMap, peers: &HashMap, + metrics: &Metrics, ) { let local_validator = match relay_parent_state.local_validator { Some(ref mut v) => v, @@ -2129,8 +2164,10 @@ async fn provide_candidate_to_grid( .await; } if !post_statements.is_empty() { + let count = post_statements.len(); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(post_statements)) .await; + metrics.on_statements_distributed(count); } } @@ -2144,12 +2181,11 @@ async fn determine_groups_per_para( let n_cores = availability_cores.len(); // Determine the core indices occupied by each para at the current relay parent. To support - // on-demand parachains we also consider the core indices at next block if core has a candidate - // pending availability. - let para_core_indices: Vec<_> = if let Some(claim_queue) = maybe_claim_queue { + // on-demand parachains we also consider the core indices at next blocks. + let schedule: HashMap> = if let Some(claim_queue) = maybe_claim_queue { claim_queue - .iter_claims_at_depth(0) - .map(|(core_index, para)| (para, core_index)) + .iter_all_claims() + .map(|(core_index, paras)| (*core_index, paras.iter().copied().collect())) .collect() } else { availability_cores @@ -2157,12 +2193,12 @@ async fn determine_groups_per_para( .enumerate() .filter_map(|(index, core)| match core { CoreState::Scheduled(scheduled_core) => - Some((scheduled_core.para_id, CoreIndex(index as u32))), + Some((CoreIndex(index as u32), vec![scheduled_core.para_id])), CoreState::Occupied(occupied_core) => if max_candidate_depth >= 1 { - occupied_core - .next_up_on_available - .map(|scheduled_core| (scheduled_core.para_id, CoreIndex(index as u32))) + occupied_core.next_up_on_available.map(|scheduled_core| { + (CoreIndex(index as u32), vec![scheduled_core.para_id]) + }) } else { None }, @@ -2173,9 +2209,12 @@ async fn determine_groups_per_para( let mut groups_per_para = HashMap::new(); // Map from `CoreIndex` to `GroupIndex` and collect as `HashMap`. - for (para, core_index) in para_core_indices { + for (core_index, paras) in schedule { let group_index = group_rotation_info.group_for_core(core_index, n_cores); - groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index) + + for para in paras { + groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index); + } } groups_per_para @@ -2528,6 +2567,7 @@ async fn handle_incoming_manifest( peer: PeerId, manifest: net_protocol::v2::BackedCandidateManifest, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { gum::debug!( target: LOG_TARGET, @@ -2584,7 +2624,7 @@ async fn handle_incoming_manifest( ) }; - let messages = acknowledgement_and_statement_messages( + let (messages, statements_count) = acknowledgement_and_statement_messages( &( peer, state @@ -2605,6 +2645,7 @@ async fn handle_incoming_manifest( if !messages.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; + metrics.on_statements_distributed(statements_count); } } else if !state.candidates.is_confirmed(&manifest.candidate_hash) { // 5. if unconfirmed, add request entry @@ -2632,9 +2673,9 @@ fn acknowledgement_and_statement_messages( group_index: GroupIndex, candidate_hash: CandidateHash, local_knowledge: StatementFilter, -) -> Vec<(Vec, net_protocol::VersionedValidationProtocol)> { +) -> (Vec<(Vec, net_protocol::VersionedValidationProtocol)>, usize) { let local_validator = match relay_parent_state.local_validator.as_mut() { - None => return Vec::new(), + None => return (Vec::new(), 0), Some(l) => l, }; @@ -2662,7 +2703,7 @@ fn acknowledgement_and_statement_messages( "Bug ValidationVersion::V1 should not be used in statement-distribution v2, legacy should have handled this" ); - return Vec::new() + return (Vec::new(), 0) }, }; @@ -2683,10 +2724,11 @@ fn acknowledgement_and_statement_messages( candidate_hash, peer, ); + let statements_count = statement_messages.len(); messages.extend(statement_messages.into_iter().map(|m| (vec![peer.0], m))); - messages + (messages, statements_count) } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -2696,6 +2738,7 @@ async fn handle_incoming_acknowledgement( peer: PeerId, acknowledgement: net_protocol::v2::BackedCandidateAcknowledgement, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { // The key difference between acknowledgments and full manifests is that only // the candidate hash is included alongside the bitfields, so the candidate @@ -2776,10 +2819,12 @@ async fn handle_incoming_acknowledgement( ); if !messages.is_empty() { + let count = messages.len(); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( messages.into_iter().map(|m| (vec![peer], m)).collect(), )) .await; + metrics.on_statements_distributed(count); } } @@ -2789,6 +2834,7 @@ pub(crate) async fn handle_backed_candidate_message( ctx: &mut Context, state: &mut State, candidate_hash: CandidateHash, + metrics: &Metrics, ) { // If the candidate is unknown or unconfirmed, it's a race (pruned before receiving message) // or a bug. Ignore if so @@ -2830,6 +2876,7 @@ pub(crate) async fn handle_backed_candidate_message( per_session, &state.authorities, &state.peers, + metrics, ) .await; @@ -2851,6 +2898,7 @@ async fn send_cluster_candidate_statements( state: &mut State, candidate_hash: CandidateHash, relay_parent: Hash, + metrics: &Metrics, ) { let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { None => return, @@ -2893,6 +2941,7 @@ async fn send_cluster_candidate_statements( &state.authorities, &state.peers, statement, + metrics, ) .await; } @@ -2910,6 +2959,7 @@ async fn apply_post_confirmation( state: &mut State, post_confirmation: PostConfirmation, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { for peer in post_confirmation.reckoning.incorrect { modify_reputation(reputation, ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; @@ -2923,6 +2973,7 @@ async fn apply_post_confirmation( state, candidate_hash, post_confirmation.hypothetical.relay_parent(), + metrics, ) .await; new_confirmed_candidate_fragment_chain_updates(ctx, state, post_confirmation.hypothetical) @@ -3048,6 +3099,7 @@ pub(crate) async fn handle_response( state: &mut State, response: UnhandledResponse, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = response.candidate_identifier(); @@ -3147,7 +3199,7 @@ pub(crate) async fn handle_response( }; // Note that this implicitly circulates all statements via the cluster. - apply_post_confirmation(ctx, state, post_confirmation, reputation).await; + apply_post_confirmation(ctx, state, post_confirmation, reputation, metrics).await; let confirmed = state.candidates.get_confirmed(&candidate_hash).expect("just confirmed; qed"); diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index e77cead4a7565de9afd71d484c57e9fddc4e2d00..2253a5ae0c668c0c107f19ad15ef933c1cf17abc 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -10,30 +10,30 @@ description = "System overseer of the Polkadot node" workspace = true [dependencies] -sc-client-api = { path = "../../../substrate/client/api" } -sp-api = { path = "../../../substrate/primitives/api" } -futures = "0.3.30" -futures-timer = "3.0.2" -parking_lot = "0.12.1" -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-metrics = { path = "../metrics" } -polkadot-primitives = { path = "../../primitives" } -orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } -gum = { package = "tracing-gum", path = "../gum" } -sp-core = { path = "../../../substrate/primitives/core" } -async-trait = "0.1.79" -tikv-jemalloc-ctl = { version = "0.5.0", optional = true } +sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +parking_lot = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } +gum = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +async-trait = { workspace = true } +tikv-jemalloc-ctl = { optional = true, workspace = true } [dev-dependencies] -metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } -sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.30", features = ["thread-pool"] } -femme = "2.2.1" -assert_matches = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } +metered = { features = ["futures_channel"], workspace = true } +sp-core = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } +femme = { workspace = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemalloc-ctl = "0.5.0" diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 177e3addf368d715d51c5bcd7d7437515d56cd5b..8e78d8fc8921a8bd26686e295b584c4e1fff6433 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -813,7 +813,7 @@ fn test_candidate_validation_msg() -> CandidateValidationMessage { fn test_candidate_backing_msg() -> CandidateBackingMessage { let (sender, _) = oneshot::channel(); - CandidateBackingMessage::GetBackedCandidates(Default::default(), sender) + CandidateBackingMessage::GetBackableCandidates(Default::default(), sender) } fn test_chain_api_msg() -> ChainApiMessage { diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index 0a84e5dae2a589ef5cd165ce2825bf811751af94..cd642bf16ff9bf7d6ffeeea2ce0dac752128bd3c 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -10,24 +10,24 @@ license.workspace = true workspace = true [dependencies] -bounded-vec = "0.7" -futures = "0.3.30" -polkadot-primitives = { path = "../../primitives" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-core = { path = "../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -schnorrkel = "0.11.4" +bounded-vec = { workspace = true } +futures = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true } +schnorrkel = { workspace = true, default-features = true } thiserror = { workspace = true } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +bitvec = { features = ["alloc"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.12.4", default-features = false } [dev-dependencies] -polkadot-erasure-coding = { path = "../../erasure-coding" } +polkadot-erasure-coding = { workspace = true, default-features = true } diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index aded1b8fe7342a9157720acda75a8d7a7f835334..660b504e97fbb1bc11c1bfd57011931157e4770d 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -59,7 +59,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.12.0"; +pub const NODE_VERSION: &'static str = "1.14.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index ec5113d2c8a5f0bc40e404dbf78201c5b2a65102..c0ddbf7dcfc36d89591567a88075c2344b025b04 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -12,147 +12,147 @@ workspace = true [dependencies] # Substrate Client -sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } -sc-consensus-babe = { path = "../../../substrate/client/consensus/babe" } -sc-consensus-beefy = { path = "../../../substrate/client/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../substrate/client/consensus/grandpa" } -mmr-gadget = { path = "../../../substrate/client/merkle-mountain-range" } -sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range" } -sc-block-builder = { path = "../../../substrate/client/block-builder" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-client-db = { path = "../../../substrate/client/db" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-slots = { path = "../../../substrate/client/consensus/slots" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-common = { path = "../../../substrate/client/network/common" } -sc-network-sync = { path = "../../../substrate/client/network/sync" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sc-sync-state-rpc = { path = "../../../substrate/client/sync-state-rpc" } -sc-keystore = { path = "../../../substrate/client/keystore" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } -sc-offchain = { path = "../../../substrate/client/offchain" } -sc-sysinfo = { path = "../../../substrate/client/sysinfo" } -sc-service = { path = "../../../substrate/client/service", default-features = false } -sc-telemetry = { path = "../../../substrate/client/telemetry" } +sc-authority-discovery = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +mmr-gadget = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-slots = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-sync-state-rpc = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sc-service = { workspace = true } +sc-telemetry = { workspace = true, default-features = true } # Substrate Primitives -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sp-inherents = { path = "../../../substrate/primitives/inherents" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-block-builder = { path = "../../../substrate/primitives/block-builder" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-offchain = { package = "sp-offchain", path = "../../../substrate/primitives/offchain" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-session = { path = "../../../substrate/primitives/session" } -sp-storage = { path = "../../../substrate/primitives/storage" } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-weights = { path = "../../../substrate/primitives/weights" } -sp-version = { path = "../../../substrate/primitives/version" } +sp-authority-discovery = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-weights = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } # Substrate Pallets -pallet-babe = { path = "../../../substrate/frame/babe" } -pallet-staking = { path = "../../../substrate/frame/staking" } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } -frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", optional = true } -frame-system = { path = "../../../substrate/frame/system" } +pallet-babe = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +frame-metadata-hash-extension = { optional = true, workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } # Substrate Other -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } -frame-support = { path = "../../../substrate/frame/support" } -frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-cli" } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking" } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } # External Crates -async-trait = "0.1.79" -futures = "0.3.30" -hex-literal = "0.4.1" -is_executable = "1.0.1" -gum = { package = "tracing-gum", path = "../gum" } +async-trait = { workspace = true } +futures = { workspace = true } +hex-literal = { workspace = true, default-features = true } +is_executable = { workspace = true } +gum = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -schnellru = "0.2.1" +schnellru = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -kvdb = "0.13.0" -kvdb-rocksdb = { version = "0.19.0", optional = true } -parity-db = { version = "0.4.12", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } -parking_lot = "0.12.1" -bitvec = { version = "1.0.1", optional = true } +kvdb = { workspace = true } +kvdb-rocksdb = { optional = true, workspace = true } +parity-db = { optional = true, workspace = true } +codec = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +bitvec = { optional = true, workspace = true, default-features = true } # Polkadot -polkadot-core-primitives = { path = "../../core-primitives" } -polkadot-node-core-parachains-inherent = { path = "../core/parachains-inherent" } -polkadot-overseer = { path = "../overseer" } -polkadot-parachain-primitives = { path = "../../parachain" } -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-rpc = { path = "../../rpc" } -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-runtime-parachains = { path = "../../runtime/parachains" } -polkadot-node-network-protocol = { path = "../network/protocol" } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-node-core-parachains-inherent = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-rpc = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } # Polkadot Runtime Constants -rococo-runtime-constants = { path = "../../runtime/rococo/constants", optional = true } -westend-runtime-constants = { path = "../../runtime/westend/constants", optional = true } +rococo-runtime-constants = { optional = true, workspace = true, default-features = true } +westend-runtime-constants = { optional = true, workspace = true, default-features = true } # Polkadot Runtimes -westend-runtime = { path = "../../runtime/westend", optional = true } -rococo-runtime = { path = "../../runtime/rococo", optional = true } +westend-runtime = { optional = true, workspace = true } +rococo-runtime = { optional = true, workspace = true } # Polkadot Subsystems -polkadot-approval-distribution = { path = "../network/approval-distribution", optional = true } -polkadot-availability-bitfield-distribution = { path = "../network/bitfield-distribution", optional = true } -polkadot-availability-distribution = { path = "../network/availability-distribution", optional = true } -polkadot-availability-recovery = { path = "../network/availability-recovery", optional = true } -polkadot-collator-protocol = { path = "../network/collator-protocol", optional = true } -polkadot-dispute-distribution = { path = "../network/dispute-distribution", optional = true } -polkadot-gossip-support = { path = "../network/gossip-support", optional = true } -polkadot-network-bridge = { path = "../network/bridge", optional = true } -polkadot-node-collation-generation = { path = "../collation-generation", optional = true } -polkadot-node-core-approval-voting = { path = "../core/approval-voting", optional = true } -polkadot-node-core-av-store = { path = "../core/av-store", optional = true } -polkadot-node-core-backing = { path = "../core/backing", optional = true } -polkadot-node-core-bitfield-signing = { path = "../core/bitfield-signing", optional = true } -polkadot-node-core-candidate-validation = { path = "../core/candidate-validation", optional = true } -polkadot-node-core-chain-api = { path = "../core/chain-api", optional = true } -polkadot-node-core-chain-selection = { path = "../core/chain-selection", optional = true } -polkadot-node-core-dispute-coordinator = { path = "../core/dispute-coordinator", optional = true } -polkadot-node-core-prospective-parachains = { path = "../core/prospective-parachains", optional = true } -polkadot-node-core-provisioner = { path = "../core/provisioner", optional = true } -polkadot-node-core-pvf = { path = "../core/pvf", optional = true } -polkadot-node-core-pvf-checker = { path = "../core/pvf-checker", optional = true } -polkadot-node-core-runtime-api = { path = "../core/runtime-api", optional = true } -polkadot-statement-distribution = { path = "../network/statement-distribution", optional = true } - -xcm = { package = "staging-xcm", path = "../../xcm" } -xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api" } +polkadot-approval-distribution = { optional = true, workspace = true, default-features = true } +polkadot-availability-bitfield-distribution = { optional = true, workspace = true, default-features = true } +polkadot-availability-distribution = { optional = true, workspace = true, default-features = true } +polkadot-availability-recovery = { optional = true, workspace = true, default-features = true } +polkadot-collator-protocol = { optional = true, workspace = true, default-features = true } +polkadot-dispute-distribution = { optional = true, workspace = true, default-features = true } +polkadot-gossip-support = { optional = true, workspace = true, default-features = true } +polkadot-network-bridge = { optional = true, workspace = true, default-features = true } +polkadot-node-collation-generation = { optional = true, workspace = true, default-features = true } +polkadot-node-core-approval-voting = { optional = true, workspace = true, default-features = true } +polkadot-node-core-av-store = { optional = true, workspace = true, default-features = true } +polkadot-node-core-backing = { optional = true, workspace = true, default-features = true } +polkadot-node-core-bitfield-signing = { optional = true, workspace = true, default-features = true } +polkadot-node-core-candidate-validation = { optional = true, workspace = true, default-features = true } +polkadot-node-core-chain-api = { optional = true, workspace = true, default-features = true } +polkadot-node-core-chain-selection = { optional = true, workspace = true, default-features = true } +polkadot-node-core-dispute-coordinator = { optional = true, workspace = true, default-features = true } +polkadot-node-core-prospective-parachains = { optional = true, workspace = true, default-features = true } +polkadot-node-core-provisioner = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf-checker = { optional = true, workspace = true, default-features = true } +polkadot-node-core-runtime-api = { optional = true, workspace = true, default-features = true } +polkadot-statement-distribution = { optional = true, workspace = true, default-features = true } + +xcm = { workspace = true, default-features = true } +xcm-runtime-apis = { workspace = true, default-features = true } [dev-dependencies] -polkadot-test-client = { path = "../test/client" } -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -env_logger = "0.11" -assert_matches = "1.5.0" -serial_test = "2.0.0" -tempfile = "3.2" +polkadot-test-client = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +env_logger = { workspace = true } +assert_matches = { workspace = true } +serial_test = { workspace = true } +tempfile = { workspace = true } [features] default = ["db", "full-node"] @@ -201,6 +201,13 @@ rococo-native = [ "rococo-runtime-constants", ] +# Generate the metadata hash needed for CheckMetadataHash +# in the test runtimes. +metadata-hash = [ + "rococo-runtime?/metadata-hash", + "westend-runtime?/metadata-hash", +] + runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", @@ -217,7 +224,7 @@ runtime-benchmarks = [ "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "westend-runtime?/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index dd8a0a7e635bc67e298f31f537f9c41fa207b6a5..e971830c95cb2de51b6d5cf25a154f8a9a701deb 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -241,7 +241,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( _: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -398,30 +398,30 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { - fn query_acceptable_payment_assets(_: xcm::Version) -> Result, xcm_fee_payment_runtime_api::fees::Error> { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(_: xcm::Version) -> Result, xcm_runtime_apis::fees::Error> { unimplemented!() } - fn query_weight_to_asset_fee(_: Weight, _: VersionedAssetId) -> Result { + fn query_weight_to_asset_fee(_: Weight, _: VersionedAssetId) -> Result { unimplemented!() } - fn query_xcm_weight(_: VersionedXcm<()>) -> Result { + fn query_xcm_weight(_: VersionedXcm<()>) -> Result { unimplemented!() } - fn query_delivery_fees(_: VersionedLocation, _: VersionedXcm<()>) -> Result { + fn query_delivery_fees(_: VersionedLocation, _: VersionedXcm<()>) -> Result { unimplemented!() } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { - fn dry_run_call(_: (), _: ()) -> Result, xcm_fee_payment_runtime_api::dry_run::Error> { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { + fn dry_run_call(_: (), _: ()) -> Result, xcm_runtime_apis::dry_run::Error> { unimplemented!() } - fn dry_run_xcm(_: VersionedLocation, _: VersionedXcm<()>) -> Result, xcm_fee_payment_runtime_api::dry_run::Error> { + fn dry_run_xcm(_: VersionedLocation, _: VersionedXcm<()>) -> Result, xcm_runtime_apis::dry_run::Error> { unimplemented!() } } diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 5001104f929a2dc2c596567c8b7febd8088e09f2..0325613d25f9a716f9a0189e8bb9db91d9051e8c 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -20,76 +20,76 @@ path = "src/cli/subsystem-bench.rs" doc = false [dependencies] -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-primitives = { path = "../../primitives" } -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-availability-recovery = { path = "../network/availability-recovery", features = ["subsystem-benchmarks"] } -polkadot-availability-distribution = { path = "../network/availability-distribution" } -polkadot-statement-distribution = { path = "../network/statement-distribution" } -polkadot-node-core-av-store = { path = "../core/av-store" } -polkadot-node-core-chain-api = { path = "../core/chain-api" } -polkadot-availability-bitfield-distribution = { path = "../network/bitfield-distribution" } -color-eyre = { version = "0.6.1", default-features = false } -polkadot-overseer = { path = "../overseer" } -colored = "2.0.4" -assert_matches = "1.5" -async-trait = "0.1.79" -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sc-keystore = { path = "../../../substrate/client/keystore" } -sp-core = { path = "../../../substrate/primitives/core" } -clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" -bincode = "1.3.3" -sha1 = "0.10.6" -hex = "0.4.3" -gum = { package = "tracing-gum", path = "../gum" } -polkadot-erasure-coding = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-availability-recovery = { features = ["subsystem-benchmarks"], workspace = true, default-features = true } +polkadot-availability-distribution = { workspace = true, default-features = true } +polkadot-statement-distribution = { workspace = true, default-features = true } +polkadot-node-core-av-store = { workspace = true, default-features = true } +polkadot-node-core-chain-api = { workspace = true, default-features = true } +polkadot-availability-bitfield-distribution = { workspace = true, default-features = true } +color-eyre = { workspace = true } +polkadot-overseer = { workspace = true, default-features = true } +colored = { workspace = true } +assert_matches = { workspace = true } +async-trait = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +clap = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +bincode = { workspace = true } +sha1 = { workspace = true } +hex = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -env_logger = "0.11" -rand = "0.8.5" +env_logger = { workspace = true } +rand = { workspace = true, default-features = true } # `rand` only supports uniform distribution, we need normal distribution for latency. -rand_distr = "0.4.3" -bitvec = "1.0.1" -kvdb-memorydb = "0.13.0" +rand_distr = { workspace = true } +bitvec = { workspace = true, default-features = true } +kvdb-memorydb = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive", "std"] } -tokio = { version = "1.24.2", features = ["parking_lot", "rt-multi-thread"] } -clap-num = "1.0.2" -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-types = { path = "../../../substrate/client/network/types" } -sc-service = { path = "../../../substrate/client/service" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -polkadot-node-metrics = { path = "../metrics" } -itertools = "0.11" -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } -prometheus = { version = "0.13.0", default-features = false } +codec = { features = ["derive", "std"], workspace = true, default-features = true } +tokio = { features = ["parking_lot", "rt-multi-thread"], workspace = true, default-features = true } +clap-num = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +itertools = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +prometheus-endpoint = { workspace = true, default-features = true } +prometheus = { workspace = true } serde = { workspace = true, default-features = true } serde_yaml = { workspace = true } serde_json = { workspace = true } -polkadot-node-core-approval-voting = { path = "../core/approval-voting" } -polkadot-approval-distribution = { path = "../network/approval-distribution" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } +polkadot-node-core-approval-voting = { workspace = true, default-features = true } +polkadot-approval-distribution = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-timestamp = { workspace = true, default-features = true } -schnorrkel = { version = "0.11.4", default-features = false } +schnorrkel = { workspace = true } # rand_core should match schnorrkel -rand_core = "0.6.2" -rand_chacha = { version = "0.3.1" } -paste = "1.0.14" -orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } -pyroscope = { version = "0.5.7" } -pyroscope_pprofrs = "0.2.7" -strum = { version = "0.24", features = ["derive"] } +rand_core = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +paste = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } +pyroscope = { workspace = true } +pyroscope_pprofrs = { workspace = true } +strum = { features = ["derive"], workspace = true, default-features = true } [features] default = [] diff --git a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml index 146da57d44c4aaf973e13c886a357028cdbe3559..cae1a30914da78c32a54ad035b8870f3a4412548 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml @@ -16,3 +16,5 @@ TestConfiguration: peer_bandwidth: 524288000000 bandwidth: 524288000000 num_blocks: 10 + connectivity: 100 + latency: null diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml index 6b17e62c20aa3f69153fb596d1a303a2e0320ddd..7edb48e302a46b464e86d5b3535ab71d1585a408 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml @@ -16,3 +16,5 @@ TestConfiguration: peer_bandwidth: 524288000000 bandwidth: 524288000000 num_blocks: 10 + connectivity: 100 + latency: null diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml index e946c28e8ef5d4e38736ffc21e56d8b1c6cd0ddc..7c24f50e6af5531ab652e838f362733f0fda6707 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml @@ -16,3 +16,6 @@ TestConfiguration: peer_bandwidth: 524288000000 bandwidth: 524288000000 num_blocks: 10 + connectivity: 100 + latency: null + diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml index 8f4b050e72f27dd4b5bb0c52bd49162cd0bb83ec..fe2402faeccdc11f82ef10d7ff8618c8cbbd30c8 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml @@ -16,3 +16,6 @@ TestConfiguration: peer_bandwidth: 524288000000 bandwidth: 524288000000 num_blocks: 10 + connectivity: 100 + latency: null + diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 5c0c65b11cdb5db521ba0045b83f937f72e6b246..4ac044ea3459a225536069f0057c78b7075bc463 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -60,7 +60,7 @@ use polkadot_node_subsystem_util::metrics::Metrics; use polkadot_overseer::Handle as OverseerHandleReal; use polkadot_primitives::{ BlockNumber, CandidateEvent, CandidateIndex, CandidateReceipt, Hash, Header, Slot, - ValidatorIndex, + ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, }; use prometheus::Registry; use sc_keystore::LocalKeystore; @@ -68,6 +68,7 @@ use sc_service::SpawnTaskHandle; use serde::{Deserialize, Serialize}; use sp_consensus_babe::Epoch as BabeEpoch; use sp_core::H256; +use sp_keystore::Keystore; use std::{ cmp::max, collections::{HashMap, HashSet}, @@ -697,12 +698,12 @@ impl PeerMessageProducer { .expect("We can't handle unknown peers") .clone(); - self.network - .send_message_from_peer( - &peer_authority_id, - protocol_v3::ValidationProtocol::ApprovalDistribution(message.msg).into(), - ) - .unwrap_or_else(|_| panic!("Network should be up and running {:?}", sent_by)); + if let Err(err) = self.network.send_message_from_peer( + &peer_authority_id, + protocol_v3::ValidationProtocol::ApprovalDistribution(message.msg).into(), + ) { + gum::warn!(target: LOG_TARGET, ?sent_by, ?err, "Validator can not send message"); + } } // Queues a message to be sent by the peer identified by the `sent_by` value. @@ -785,6 +786,12 @@ fn build_overseer( let db: polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); let keystore = LocalKeystore::in_memory(); + keystore + .sr25519_generate_new( + ASSIGNMENT_KEY_TYPE_ID, + Some(state.test_authorities.key_seeds.get(NODE_UNDER_TEST as usize).unwrap().as_str()), + ) + .unwrap(); let system_clock = PastSystemClock::new(SystemClock {}, state.delta_tick_from_generated.clone()); @@ -987,11 +994,12 @@ pub async fn bench_approvals_run( "polkadot_parachain_subsystem_bounded_received", Some(("subsystem_name", "approval-distribution-subsystem")), |value| { - gum::info!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); + gum::debug!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); value >= at_least_messages as f64 }, ) .await; + gum::info!("Requesting approval votes ms"); for info in &state.blocks { @@ -1031,7 +1039,7 @@ pub async fn bench_approvals_run( "polkadot_parachain_subsystem_bounded_received", Some(("subsystem_name", "approval-distribution-subsystem")), |value| { - gum::info!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); + gum::debug!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); value >= at_least_messages as f64 }, ) diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index be9dbd55cb6f9898a879e7bbaa6c5f5b9c303484..ee45ea05c925a433394de3e3e5e2c315dac0aff2 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -28,7 +28,7 @@ use polkadot_node_subsystem_types::OverseerSignal; use polkadot_primitives::{ node_features, AsyncBackingParams, CandidateEvent, CandidateReceipt, CoreState, GroupIndex, GroupRotationInfo, IndexedVec, NodeFeatures, OccupiedCore, ScheduledCore, SessionIndex, - SessionInfo, ValidatorIndex, + SessionInfo, ValidationCode, ValidatorIndex, }; use sp_consensus_babe::Epoch as BabeEpoch; use sp_core::H256; @@ -288,6 +288,15 @@ impl MockRuntimeApi { }; tx.send(Ok((groups, group_rotation_info))).unwrap(); }, + RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidationCodeByHash(_, tx), + ) => { + let validation_code = ValidationCode(Vec::new()); + if let Err(err) = tx.send(Ok(Some(validation_code))) { + gum::error!(target: LOG_TARGET, ?err, "validation code wasn't received"); + } + }, // Long term TODO: implement more as needed. message => { unimplemented!("Unexpected runtime-api message: {:?}", message) diff --git a/polkadot/node/subsystem-test-helpers/Cargo.toml b/polkadot/node/subsystem-test-helpers/Cargo.toml index 57678e8e8d4a1057ec843f097c2cd9811f33a6bb..d3229291673c6da0b1ba12d6983c678ed2d5a343 100644 --- a/polkadot/node/subsystem-test-helpers/Cargo.toml +++ b/polkadot/node/subsystem-test-helpers/Cargo.toml @@ -11,19 +11,19 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.30" -parking_lot = "0.12.1" -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-erasure-coding = { path = "../../erasure-coding" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } +async-trait = { workspace = true } +futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } -sc-client-api = { path = "../../../substrate/client/api" } -sc-utils = { path = "../../../substrate/client/utils" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sc-keystore = { path = "../../../substrate/client/keystore" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } +sc-client-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 0178b193cba8c88f2cc7b4a980beae8f7de106ff..c8fc324699e1754cb0d476a97ef29c8fea4ede74 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -10,26 +10,26 @@ license.workspace = true workspace = true [dependencies] -derive_more = "0.99.17" -fatality = "0.1.1" -futures = "0.3.30" -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-statement-table = { path = "../../statement-table" } -polkadot-node-jaeger = { path = "../jaeger" } -orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } -sc-network = { path = "../../../substrate/client/network" } -sc-network-types = { path = "../../../substrate/client/network/types" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -smallvec = "1.8.0" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } +derive_more = { workspace = true, default-features = true } +fatality = { workspace = true } +futures = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-statement-table = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } thiserror = { workspace = true } -async-trait = "0.1.79" -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +async-trait = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 722a97989bce0b8587c8992827f6438fd78ff21e..ee937bca05bfedbcf3e25103ebdf9c765b967401 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -85,7 +85,7 @@ pub enum CandidateBackingMessage { /// candidates of the same para that follow it in the input vector. In other words, assuming /// candidates are supplied in dependency order, we must ensure that this dependency order is /// preserved. - GetBackedCandidates( + GetBackableCandidates( HashMap>, oneshot::Sender>>, ), diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index b7fb75b94b2c726cb0171b48b023a9d9517970ac..98ea21f250eda7874f03ba1aeffd482fedbc8808 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -10,47 +10,47 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.30" -futures-channel = "0.3.23" -itertools = "0.11" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -parking_lot = "0.12.1" -pin-project = "1.0.9" -rand = "0.8.5" +async-trait = { workspace = true } +futures = { workspace = true } +futures-channel = { workspace = true } +itertools = { workspace = true } +codec = { features = ["derive"], workspace = true } +parking_lot = { workspace = true, default-features = true } +pin-project = { workspace = true } +rand = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = "0.1.1" -gum = { package = "tracing-gum", path = "../gum" } -derive_more = "0.99.17" -schnellru = "0.2.1" +fatality = { workspace = true } +gum = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +schnellru = { workspace = true } -polkadot-erasure-coding = { path = "../../erasure-coding" } -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-jaeger = { path = "../jaeger" } -polkadot-node-metrics = { path = "../metrics" } -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-overseer = { path = "../overseer" } -metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +metered = { features = ["futures_channel"], workspace = true } -sp-core = { path = "../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sc-client-api = { path = "../../../substrate/client/api" } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } -kvdb = "0.13.0" -parity-db = { version = "0.4.12" } +kvdb = { workspace = true } +parity-db = { workspace = true } [dev-dependencies] -assert_matches = "1.4.0" -env_logger = "0.11" -futures = { version = "0.3.30", features = ["thread-pool"] } +assert_matches = { workspace = true } +env_logger = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -lazy_static = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -kvdb-shared-tests = "0.11.0" -tempfile = "3.1.0" -kvdb-memorydb = "0.13.0" +polkadot-node-subsystem-test-helpers = { workspace = true } +lazy_static = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +kvdb-shared-tests = { workspace = true } +tempfile = { workspace = true } +kvdb-memorydb = { workspace = true } diff --git a/polkadot/node/subsystem-util/src/vstaging.rs b/polkadot/node/subsystem-util/src/vstaging.rs index b166a54f75c46ebfe4dd8d06af32a3dea3495b7a..b6cd73f412b33a10c814be9813e0cd14bd843934 100644 --- a/polkadot/node/subsystem-util/src/vstaging.rs +++ b/polkadot/node/subsystem-util/src/vstaging.rs @@ -31,7 +31,7 @@ const LOG_TARGET: &'static str = "parachain::subsystem-util-vstaging"; /// A snapshot of the runtime claim queue at an arbitrary relay chain block. #[derive(Default)] -pub struct ClaimQueueSnapshot(BTreeMap>); +pub struct ClaimQueueSnapshot(pub BTreeMap>); impl From>> for ClaimQueueSnapshot { fn from(claim_queue_snapshot: BTreeMap>) -> Self { @@ -56,6 +56,19 @@ impl ClaimQueueSnapshot { .iter() .filter_map(move |(core_index, paras)| Some((*core_index, *paras.get(depth)?))) } + + /// Returns an iterator over all claims on the given core. + pub fn iter_claims_for_core( + &self, + core_index: &CoreIndex, + ) -> impl Iterator + '_ { + self.0.get(core_index).map(|c| c.iter()).into_iter().flatten() + } + + /// Returns an iterator over the whole claim queue. + pub fn iter_all_claims(&self) -> impl Iterator)> + '_ { + self.0.iter() + } } // TODO: https://github.com/paritytech/polkadot-sdk/issues/1940 diff --git a/polkadot/node/subsystem/Cargo.toml b/polkadot/node/subsystem/Cargo.toml index c59c1f88e33995aef7578da58e28d086668f14ee..8edfea9e26bf5d2176b672c9d24bb1e0251b33ae 100644 --- a/polkadot/node/subsystem/Cargo.toml +++ b/polkadot/node/subsystem/Cargo.toml @@ -10,6 +10,6 @@ license.workspace = true workspace = true [dependencies] -polkadot-overseer = { path = "../overseer" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-jaeger = { path = "../jaeger" } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 0b49866ee2aec4f9676241a50271a16657b221e0..587af659fbd2dfeb142c4f307ce71a92b0fae35b 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -10,35 +10,35 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Polkadot dependencies -polkadot-test-runtime = { path = "../../../runtime/test-runtime" } -polkadot-test-service = { path = "../service" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-test-runtime = { workspace = true } +polkadot-test-service = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } # Substrate dependencies -substrate-test-client = { path = "../../../../substrate/test-utils/client" } -sc-service = { path = "../../../../substrate/client/service" } -sc-block-builder = { path = "../../../../substrate/client/block-builder" } -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sc-offchain = { path = "../../../../substrate/client/offchain" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-api = { path = "../../../../substrate/primitives/api" } -sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } -sp-io = { path = "../../../../substrate/primitives/io" } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } +substrate-test-client = { workspace = true } +sc-service = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } [dev-dependencies] -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = "0.3.30" +sp-keyring = { workspace = true, default-features = true } +futures = { workspace = true } [features] runtime-benchmarks = [ diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 3fc6d060870b1c36940cd7b75e50cddcf55782d3..8eb6105f98e2571bab0257694ccac49ae47c2ca9 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -10,60 +10,60 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -hex = "0.4.3" -gum = { package = "tracing-gum", path = "../../gum" } -rand = "0.8.5" +futures = { workspace = true } +hex = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -tempfile = "3.2.0" -tokio = "1.37" +tempfile = { workspace = true } +tokio = { workspace = true, default-features = true } # Polkadot dependencies -polkadot-overseer = { path = "../../overseer" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-parachain-primitives = { path = "../../../parachain" } -polkadot-rpc = { path = "../../../rpc" } -polkadot-runtime-common = { path = "../../../runtime/common" } -polkadot-service = { path = "../../service" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-test-runtime = { path = "../../../runtime/test-runtime" } -test-runtime-constants = { path = "../../../runtime/test-runtime/constants" } -polkadot-runtime-parachains = { path = "../../../runtime/parachains" } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-rpc = { workspace = true, default-features = true } +polkadot-runtime-common = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-test-runtime = { workspace = true } +test-runtime-constants = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } # Substrate dependencies -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } -sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } -sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -frame-system = { path = "../../../../substrate/frame/system" } -sc-consensus-grandpa = { path = "../../../../substrate/client/consensus/grandpa" } -sp-consensus-grandpa = { path = "../../../../substrate/primitives/consensus/grandpa" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -pallet-staking = { path = "../../../../substrate/frame/staking" } -pallet-balances = { path = "../../../../substrate/frame/balances" } -pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } -sc-chain-spec = { path = "../../../../substrate/client/chain-spec" } -sc-cli = { path = "../../../../substrate/client/cli" } -sc-client-api = { path = "../../../../substrate/client/api" } -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sc-network = { path = "../../../../substrate/client/network" } -sc-tracing = { path = "../../../../substrate/client/tracing" } -sc-transaction-pool = { path = "../../../../substrate/client/transaction-pool" } -sc-service = { path = "../../../../substrate/client/service", default-features = false } -sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } -substrate-test-client = { path = "../../../../substrate/test-utils/client" } +sp-authority-discovery = { workspace = true, default-features = true } +sc-authority-discovery = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-service = { workspace = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } -substrate-test-utils = { path = "../../../../substrate/test-utils" } -tokio = { version = "1.37", features = ["macros"] } +pallet-balances = { workspace = true } +substrate-test-utils = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } [features] runtime-metrics = ["polkadot-test-runtime/runtime-metrics"] diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index 31662ccfc464913989e479a12e4bd976451498dc..a9bf1f5ef093a905de7ca96c515a5a1b2c9cd17b 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -12,14 +12,14 @@ license.workspace = true workspace = true [dependencies] -tokio = { version = "1.24.2", default-features = false, features = ["macros", "net", "rt-multi-thread", "sync"] } -url = "2.3.1" -tokio-tungstenite = "0.20.1" -futures-util = "0.3.30" -lazy_static = "1.4.0" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -reqwest = { version = "0.11", features = ["rustls-tls"], default-features = false } +tokio = { features = ["macros", "net", "rt-multi-thread", "sync"], workspace = true } +url = { workspace = true } +tokio-tungstenite = { workspace = true } +futures-util = { workspace = true, default-features = true } +lazy_static = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +reqwest = { features = ["rustls-tls"], workspace = true } thiserror = { workspace = true } -gum = { package = "tracing-gum", path = "../gum" } +gum = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 11e8e3ce6d843cf410ce4a2c0bfb2ceacde9ce3e..1491af0148e0b16da53078935b802ae3287f6c36 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -13,15 +13,15 @@ workspace = true # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -sp-std = { path = "../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-core = { path = "../../substrate/primitives/core", default-features = false, features = ["serde"] } -sp-weights = { path = "../../substrate/primitives/weights", default-features = false } -polkadot-core-primitives = { path = "../core-primitives", default-features = false } -derive_more = "0.99.11" -bounded-collections = { version = "0.2.0", default-features = false, features = ["serde"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-std = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-weights = { workspace = true } +polkadot-core-primitives = { workspace = true } +derive_more = { workspace = true, default-features = true } +bounded-collections = { features = ["serde"], workspace = true } # all optional crates. serde = { features = ["alloc", "derive"], workspace = true } diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index c58b11a11b01f9fb16d80fbe836cdcc956a84890..9f35653f957f3ba8423c5a1e47ca08a05d6bf6c5 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -11,14 +11,14 @@ publish = false workspace = true [dependencies] -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +tiny-keccak = { features = ["keccak"], workspace = true } +codec = { features = ["derive"], workspace = true } -test-parachain-adder = { path = "adder" } -test-parachain-halt = { path = "halt" } +test-parachain-adder = { workspace = true } +test-parachain-halt = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../substrate/primitives/core" } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index e0bbe177eedced788de07dc5bf753b4a2f9bb66e..1661112a7b3263caa55a9ced38b08f33ac6e3d2f 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -12,17 +12,17 @@ publish = false workspace = true [dependencies] -polkadot-parachain-primitives = { path = "../..", default-features = false, features = ["wasm-api"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -dlmalloc = { version = "0.2.4", features = ["global"] } +polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +codec = { features = ["derive"], workspace = true } +sp-std = { workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } +dlmalloc = { features = ["global"], workspace = true } # We need to make sure the global allocator is disabled until we have support of full substrate externalities -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = ["disable_allocator"] } +sp-io = { features = ["disable_allocator"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 996735e8c8bf8e641de15acf4bb2bd2e126c94fe..061378a76a82eb8edcf8823d7ee3d43d7d3fc7e9 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -15,30 +15,30 @@ name = "adder-collator" path = "src/main.rs" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" +codec = { features = ["derive"], workspace = true } +clap = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -test-parachain-adder = { path = ".." } -polkadot-primitives = { path = "../../../../primitives" } -polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } -polkadot-node-primitives = { path = "../../../../node/primitives" } -polkadot-node-subsystem = { path = "../../../../node/subsystem" } +test-parachain-adder = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-cli = { workspace = true, default-features = true } +polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } -sc-cli = { path = "../../../../../substrate/client/cli" } -sp-core = { path = "../../../../../substrate/primitives/core" } -sc-service = { path = "../../../../../substrate/client/service" } +sc-cli = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } [dev-dependencies] -polkadot-parachain-primitives = { path = "../../.." } -polkadot-test-service = { path = "../../../../node/test/service" } -polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"] } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-test-service = { workspace = true } +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } -substrate-test-utils = { path = "../../../../../substrate/test-utils" } -sc-service = { path = "../../../../../substrate/client/service" } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +substrate-test-utils = { workspace = true } +sc-service = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } -tokio = { version = "1.24.2", features = ["macros"] } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/parachain/test-parachains/halt/Cargo.toml b/polkadot/parachain/test-parachains/halt/Cargo.toml index 1bdd4392ad313dbdcf62d36bd04cab7330fdf3fb..f8272f6ed19681adf3faa57dfac3c4f126db6839 100644 --- a/polkadot/parachain/test-parachains/halt/Cargo.toml +++ b/polkadot/parachain/test-parachains/halt/Cargo.toml @@ -14,8 +14,8 @@ workspace = true [dependencies] [build-dependencies] -substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } -rustversion = "1.0.6" +substrate-wasm-builder = { workspace = true, default-features = true } +rustversion = { workspace = true } [features] default = ["std"] diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 4d3d2abaeafed85dea9a415da47b620981f1e5ca..2466c6a0d69d766f53aafab6c59ae35915570f23 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -12,18 +12,18 @@ license.workspace = true workspace = true [dependencies] -polkadot-parachain-primitives = { path = "../..", default-features = false, features = ["wasm-api"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -dlmalloc = { version = "0.2.4", features = ["global"] } +polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +codec = { features = ["derive"], workspace = true } +sp-std = { workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } +dlmalloc = { features = ["global"], workspace = true } log = { workspace = true } # We need to make sure the global allocator is disabled until we have support of full substrate externalities -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = ["disable_allocator"] } +sp-io = { features = ["disable_allocator"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 288549c2c268ab93a3b39d5bc2e184a28b7dc158..5760258c70ea5757f5fda90abbfe5a41e5ee0fb4 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -15,30 +15,30 @@ name = "undying-collator" path = "src/main.rs" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" +codec = { features = ["derive"], workspace = true } +clap = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -test-parachain-undying = { path = ".." } -polkadot-primitives = { path = "../../../../primitives" } -polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } -polkadot-node-primitives = { path = "../../../../node/primitives" } -polkadot-node-subsystem = { path = "../../../../node/subsystem" } +test-parachain-undying = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-cli = { workspace = true, default-features = true } +polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } -sc-cli = { path = "../../../../../substrate/client/cli" } -sp-core = { path = "../../../../../substrate/primitives/core" } -sc-service = { path = "../../../../../substrate/client/service" } +sc-cli = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } [dev-dependencies] -polkadot-parachain-primitives = { path = "../../.." } -polkadot-test-service = { path = "../../../../node/test/service" } -polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"] } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-test-service = { workspace = true } +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } -substrate-test-utils = { path = "../../../../../substrate/test-utils" } -sc-service = { path = "../../../../../substrate/client/service" } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +substrate-test-utils = { workspace = true } +sc-service = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } -tokio = { version = "1.24.2", features = ["macros"] } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index d6df077b88b771ce556db3cc056745a5254a6bc5..c0b510a8fe9d0118fba52f7c8869210c3121969c 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -10,28 +10,28 @@ description = "Shared primitives used by Polkadot runtime" workspace = true [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } -hex-literal = "0.4.1" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } -log = { workspace = true, default-features = false } +bitvec = { features = ["alloc", "serde"], workspace = true } +hex-literal = { workspace = true, default-features = true } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive", "serde"], workspace = true } +log = { workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -sp-application-crypto = { path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } -sp-inherents = { path = "../../substrate/primitives/inherents", default-features = false } -sp-core = { path = "../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -sp-api = { path = "../../substrate/primitives/api", default-features = false } -sp-arithmetic = { path = "../../substrate/primitives/arithmetic", default-features = false, features = ["serde"] } -sp-authority-discovery = { path = "../../substrate/primitives/authority-discovery", default-features = false, features = ["serde"] } -sp-consensus-slots = { path = "../../substrate/primitives/consensus/slots", default-features = false, features = ["serde"] } -sp-io = { path = "../../substrate/primitives/io", default-features = false } -sp-keystore = { path = "../../substrate/primitives/keystore", optional = true, default-features = false } -sp-staking = { path = "../../substrate/primitives/staking", default-features = false, features = ["serde"] } -sp-std = { package = "sp-std", path = "../../substrate/primitives/std", default-features = false } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-inherents = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-api = { workspace = true } +sp-arithmetic = { features = ["serde"], workspace = true } +sp-authority-discovery = { features = ["serde"], workspace = true } +sp-consensus-slots = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-std = { workspace = true } -polkadot-core-primitives = { path = "../core-primitives", default-features = false } -polkadot-parachain-primitives = { path = "../parachain", default-features = false } +polkadot-core-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } [features] default = ["std"] diff --git a/polkadot/primitives/test-helpers/Cargo.toml b/polkadot/primitives/test-helpers/Cargo.toml index fab9480cfdeb9876c2556ae78a690775bf16d7a8..a44996ad6ef2dfdbeeea6c75fadb397d500eafec 100644 --- a/polkadot/primitives/test-helpers/Cargo.toml +++ b/polkadot/primitives/test-helpers/Cargo.toml @@ -10,9 +10,9 @@ license.workspace = true workspace = true [dependencies] -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-application-crypto = { package = "sp-application-crypto", path = "../../../substrate/primitives/application-crypto", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-core = { path = "../../../substrate/primitives/core", features = ["std"] } -polkadot-primitives = { path = ".." } -rand = "0.8.5" +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true } +sp-runtime = { workspace = true, default-features = true } +sp-core = { features = ["std"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/candidate-backing.md b/polkadot/roadmap/implementers-guide/src/node/backing/candidate-backing.md index 31f8423fe27b275452b84bc8d2486e10b81e1908..0e483f02ec312ae3649b158735dd5872b56eadf7 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/candidate-backing.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/candidate-backing.md @@ -1,5 +1,9 @@ # Candidate Backing +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + The Candidate Backing subsystem ensures every parablock considered for relay block inclusion has been seconded by at least one validator, and approved by a quorum. Parablocks for which not enough validators will assert correctness are discarded. If the block later proves invalid, the initial backers are slashable; this gives Polkadot a rational threat diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md index 701f6c87caff0341c36e4b2799d2444c015c411c..61278621cf565c226c2133eb90c0d7e0430c5624 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md @@ -1,5 +1,9 @@ # Prospective Parachains +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + ## Overview **Purpose:** Tracks and handles prospective parachain fragments and informs diff --git a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md index 1fed671170c7c42f6de97dead0e53200ef5d675f..432d9ab69bab99297b51fb5af285e7636e8b90ae 100644 --- a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md @@ -1,5 +1,9 @@ # Collator Protocol +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + The Collator Protocol implements the network protocol by which collators and validators communicate. It is used by collators to distribute collations to validators and used by validators to accept collations by collators. diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md b/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md index b017259da8c0863e47deb87e916076d20ed95996..64727d39fabe0dee80e047a1444894733c827014 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md @@ -1,5 +1,9 @@ # Provisioner +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + Relay chain block authorship authority is governed by BABE and is beyond the scope of the Overseer and the rest of the subsystems. That said, ultimately the block author needs to select a set of backable parachain candidates and other consensus data, and assemble a block from them. This subsystem is responsible for providing the necessary data to all diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index 0700a781d426324c2c35e72628caa65b577ef979..5031433cf5a1d7a5517458d5f0a5b31a024c4d49 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -1,5 +1,9 @@ # Inclusion Pallet +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + The inclusion module is responsible for inclusion and availability of scheduled parachains. It also manages the UMP dispatch queue of each parachain. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md index 7972c706b9ee1ebce2132a49ccf62a194d6c1e58..f21e1a59c1a4c0a9e05ae41361f5d7d9fa216378 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md @@ -1,5 +1,9 @@ # `ParaInherent` +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + This module is responsible for providing all data given to the runtime by the block author to the various parachains modules. The entry-point is mandatory, in that it must be invoked exactly once within every block, and it is also "inherent", in that it is provided with no origin by the block author. The data within it carries its own diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index cceb4dc5a93b3f9ccf643572e1ba32b38aa5a853..d01528d4dee07d2d1d5ae59f5dac6a61e60fb197 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -10,31 +10,31 @@ description = "Polkadot specific RPC functionality." workspace = true [dependencies] -jsonrpsee = { version = "0.22", features = ["server"] } -polkadot-primitives = { path = "../primitives" } -sc-client-api = { path = "../../substrate/client/api" } -sp-blockchain = { path = "../../substrate/primitives/blockchain" } -sp-keystore = { path = "../../substrate/primitives/keystore" } -sp-runtime = { path = "../../substrate/primitives/runtime" } -sp-api = { path = "../../substrate/primitives/api" } -sp-application-crypto = { path = "../../substrate/primitives/application-crypto" } -sp-consensus = { path = "../../substrate/primitives/consensus/common" } -sp-consensus-babe = { path = "../../substrate/primitives/consensus/babe" } -sp-consensus-beefy = { path = "../../substrate/primitives/consensus/beefy" } -sc-chain-spec = { path = "../../substrate/client/chain-spec" } -sc-rpc = { path = "../../substrate/client/rpc" } -sc-rpc-spec-v2 = { path = "../../substrate/client/rpc-spec-v2" } -sc-consensus-babe = { path = "../../substrate/client/consensus/babe" } -sc-consensus-babe-rpc = { path = "../../substrate/client/consensus/babe/rpc" } -sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } -sc-consensus-beefy-rpc = { path = "../../substrate/client/consensus/beefy/rpc" } -sc-consensus-epochs = { path = "../../substrate/client/consensus/epochs" } -sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } -sc-consensus-grandpa-rpc = { path = "../../substrate/client/consensus/grandpa/rpc" } -sc-sync-state-rpc = { path = "../../substrate/client/sync-state-rpc" } -sc-transaction-pool-api = { path = "../../substrate/client/transaction-pool/api" } -substrate-frame-rpc-system = { path = "../../substrate/utils/frame/rpc/system" } -mmr-rpc = { path = "../../substrate/client/merkle-mountain-range/rpc" } -pallet-transaction-payment-rpc = { path = "../../substrate/frame/transaction-payment/rpc" } -sp-block-builder = { path = "../../substrate/primitives/block-builder" } -substrate-state-trie-migration-rpc = { path = "../../substrate/utils/frame/rpc/state-trie-migration-rpc" } +jsonrpsee = { features = ["server"], workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-babe-rpc = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-beefy-rpc = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sc-consensus-grandpa-rpc = { workspace = true, default-features = true } +sc-sync-state-rpc = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } +mmr-rpc = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +substrate-state-trie-migration-rpc = { workspace = true, default-features = true } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index da89bd2251acff0d0c76b49908109a1ff662846c..718f8b8b070996fb5266ef857e7ba6295edfa676 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -10,66 +10,66 @@ license.workspace = true workspace = true [dependencies] -impl-trait-for-tuples = "0.2.2" -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +impl-trait-for-tuples = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +rustc-hex = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc"], workspace = true } serde_derive = { workspace = true } -static_assertions = "1.1.0" +static_assertions = { workspace = true, default-features = true } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = ["serde"] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } -sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", default-features = false, features = ["serde"] } +sp-api = { workspace = true } +sp-inherents = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-npos-elections = { features = ["serde"], workspace = true } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-broker = { path = "../../../substrate/frame/broker", default-features = false } -pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } -pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -pallet-staking-reward-fn = { path = "../../../substrate/frame/staking/reward-fn", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false, optional = true } -pallet-election-provider-multi-phase = { path = "../../../substrate/frame/election-provider-multi-phase", default-features = false } -frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-broker = { workspace = true } +pallet-fast-unstake = { workspace = true } +pallet-identity = { workspace = true } +pallet-session = { workspace = true } +frame-support = { workspace = true } +pallet-staking = { workspace = true } +pallet-staking-reward-fn = { workspace = true } +frame-system = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-vesting = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-treasury = { workspace = true } +pallet-asset-rate = { optional = true, workspace = true } +pallet-election-provider-multi-phase = { workspace = true } +frame-election-provider-support = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-babe = { optional = true, workspace = true } -polkadot-primitives = { path = "../../primitives", default-features = false } -libsecp256k1 = { version = "0.7.0", default-features = false } -polkadot-runtime-parachains = { path = "../parachains", default-features = false } +polkadot-primitives = { workspace = true } +libsecp256k1 = { workspace = true } +polkadot-runtime-parachains = { workspace = true } -slot-range-helper = { path = "slot_range_helper", default-features = false } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false, optional = true } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } +slot-range-helper = { workspace = true } +xcm = { workspace = true } +xcm-executor = { optional = true, workspace = true } +xcm-builder = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" -frame-support-test = { path = "../../../substrate/frame/support/test" } -pallet-babe = { path = "../../../substrate/frame/babe" } -pallet-treasury = { path = "../../../substrate/frame/treasury" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } +hex-literal = { workspace = true, default-features = true } +frame-support-test = { workspace = true } +pallet-babe = { workspace = true, default-features = true } +pallet-treasury = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -libsecp256k1 = "0.7.0" -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } +libsecp256k1 = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } [features] default = ["std"] diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 47e8fea240025c02c2e6efa81ebbe54f7e3da115..6beca68f7678f3b575f68551b7283880aaf78915 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -10,11 +10,11 @@ description = "Helper crate for generating slot ranges for the Polkadot runtime. workspace = true [dependencies] -paste = "1.0" -enumn = "0.1.12" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-std = { package = "sp-std", path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +paste = { workspace = true, default-features = true } +enumn = { workspace = true } +codec = { features = ["derive"], workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 368708f256403d88d2f41cf105dd4dcc20892f28..d0a531b8b6ca5ad995803edc8cbc45daa88176c2 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -698,24 +698,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } impl parachains_configuration::Config for Test { diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index 199b18fba51dc4f1c0b2e49b1f6429d0526e3306..19d82ae85d0035aea89b5d07f0f042be237da42e 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -674,7 +674,7 @@ mod tests { use frame_support::{ assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, parameter_types, - traits::{ConstU32, EitherOfDiverse, OnFinalize, OnInitialize}, + traits::{EitherOfDiverse, OnFinalize, OnInitialize}, }; use frame_system::{EnsureRoot, EnsureSignedBy}; use pallet_balances; @@ -725,25 +725,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxReserves: u32 = 50; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug)] @@ -1426,7 +1410,8 @@ mod tests { #[test] fn initialize_winners_in_ending_period_works() { new_test_ext().execute_with(|| { - assert_eq!(::ExistentialDeposit::get(), 1); + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); run_to_block(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 1)); let para_1 = ParaId::from(1_u32); @@ -1539,7 +1524,8 @@ mod tests { #[test] fn less_winning_samples_work() { new_test_ext().execute_with(|| { - assert_eq!(::ExistentialDeposit::get(), 1); + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); EndingPeriod::set(30); SampleLength::set(10); diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 54208e7fd1351202c0ee64bd9d0fc8adcab09b84..c12af215a04d7119c1052a09a6eb6bee7ac0acb3 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -708,7 +708,7 @@ mod tests { assert_err, assert_noop, assert_ok, derive_impl, dispatch::{GetDispatchInfo, Pays}, ord_parameter_types, parameter_types, - traits::{ConstU32, ExistenceRequirement, WithdrawReasons}, + traits::{ExistenceRequirement, WithdrawReasons}, }; use pallet_balances; use sp_runtime::{ @@ -738,24 +738,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 1dbba363de5661481093f40288e68928c9747eb5..61d406aa681268d8ef0c66567faf7b74555a287d 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -860,7 +860,7 @@ mod tests { use frame_support::{ assert_noop, assert_ok, derive_impl, parameter_types, - traits::{ConstU32, OnFinalize, OnInitialize}, + traits::{OnFinalize, OnInitialize}, }; use polkadot_primitives::Id as ParaId; use sp_core::H256; @@ -918,24 +918,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } #[derive(Copy, Clone, Eq, PartialEq, Debug)] @@ -980,7 +965,7 @@ mod tests { let fund = Funds::::get(para).unwrap(); let account_id = Crowdloan::fund_account_id(fund.fund_index); if winner { - let ed = ::ExistentialDeposit::get(); + let ed: u64 = ::ExistentialDeposit::get(); let free_balance = Balances::free_balance(&account_id); Balances::reserve(&account_id, free_balance - ed) .expect("should be able to reserve free balance minus ED"); @@ -1815,7 +1800,8 @@ mod tests { #[test] fn withdraw_from_finished_works() { new_test_ext().execute_with(|| { - assert_eq!(::ExistentialDeposit::get(), 1); + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); let para = new_para(); let index = NextFundIndex::::get(); let account_id = Crowdloan::fund_account_id(index); diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index ac2288c906a53fe15a18cd795097e1e7e85ec3c3..709cc69cdbeab470b9de99b16ec4585e09df8fb9 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -249,7 +249,7 @@ mod tests { parameter_types, traits::{ tokens::{PayFromAccount, UnityAssetBalanceConversion}, - ConstU32, FindAuthor, + FindAuthor, }, weights::Weight, PalletId, @@ -315,20 +315,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { @@ -339,13 +328,8 @@ mod tests { impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = (); - type ProposalBondMinimum = (); - type ProposalBondMaximum = (); type SpendPeriod = (); type Burn = (); type BurnDestination = (); diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index e77035b3f6b414fcf6af1fa1f93fa14428fe8ac1..052fb0389db40c086d2c13b3d30501e093ef5f90 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -173,23 +173,12 @@ impl pallet_timestamp::Config for Test { parameter_types! { pub static ExistentialDeposit: Balance = 1; - pub const MaxReserves: u32 = 50; } - +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl configuration::Config for Test { diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 9bbb152f855f2a3e8f37925cd4006ccb4554ab92..6b9191f7c6f2db0f9e98b0a67ead596d4c3a9822 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -721,7 +721,7 @@ mod tests { assert_noop, assert_ok, derive_impl, error::BadOrigin, parameter_types, - traits::{ConstU32, OnFinalize, OnInitialize}, + traits::{OnFinalize, OnInitialize}, }; use frame_system::limits; use pallet_balances::Error as BalancesError; @@ -799,20 +799,11 @@ mod tests { pub const ExistentialDeposit: Balance = 1; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Balance = Balance; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } impl shared::Config for Test { diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index 5ae6b422618e09946750de2def845acc1bc0689f..eb480e4efe1f82f657daaa03cd33d883a01d9f7d 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -534,24 +534,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index 900e04eaff1882d5c1faeb3494dc7c87f1c112bf..747b7b5ca634ed0563061aea2d169624a2ed87ac 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -551,24 +551,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/common/src/traits.rs b/polkadot/runtime/common/src/traits.rs index 2ed1fb8af9beacda31b0134ebc72c95b24c2d8cd..eadeac74fcdb63cab542ca080cd372b5fbd4f4fd 100644 --- a/polkadot/runtime/common/src/traits.rs +++ b/polkadot/runtime/common/src/traits.rs @@ -56,7 +56,7 @@ pub trait Registrar { /// Remove any lock on the para registration. fn remove_lock(id: ParaId); - /// Register a Para ID under control of `who`. Registration may be be + /// Register a Para ID under control of `who`. Registration may be /// delayed by session rotation. fn register( who: Self::AccountId, diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index 342c5a885033884f025c34cadca03f37e1a7297e..54c685effc1165f7528029f01717ee1ad6f3a298 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -10,13 +10,13 @@ description = "Runtime metric interface for the Polkadot node" workspace = true [dependencies] -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +sp-std = { workspace = true } +sp-tracing = { workspace = true } +codec = { workspace = true } +polkadot-primitives = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } -bs58 = { version = "0.5.0", default-features = false, features = ["alloc"] } +bs58 = { features = ["alloc"], workspace = true } [features] default = ["std"] diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 7a75b75428dc83945310cb95e7627ee0840698d4..49a4baf0a5eaf819f510633c8b5a2329b10e8f61 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -10,78 +10,66 @@ license.workspace = true workspace = true [dependencies] -impl-trait-for-tuples = "0.2.2" -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ - "derive", - "max-encoded-len", -] } +impl-trait-for-tuples = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } log = { workspace = true } -rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = [ - "derive", -] } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -derive_more = "0.99.17" -bitflags = "1.3.2" +derive_more = { workspace = true, default-features = true } +bitflags = { workspace = true } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = [ - "serde", -] } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = [ - "serde", -] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = [ - "serde", -] } -sp-keystore = { path = "../../../substrate/primitives/keystore", optional = true, default-features = false } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false, optional = true } -sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false, optional = true } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } +sp-api = { workspace = true } +sp-inherents = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-application-crypto = { optional = true, workspace = true } +sp-tracing = { optional = true, workspace = true } +sp-arithmetic = { workspace = true } -pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } -pallet-broker = { path = "../../../substrate/frame/broker", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default-features = false, optional = true } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } +pallet-authority-discovery = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-babe = { workspace = true } +pallet-broker = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-mmr = { workspace = true, optional = true } +pallet-session = { workspace = true } +pallet-staking = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-vesting = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +polkadot-primitives = { workspace = true } -rand = { version = "0.8.5", default-features = false } -rand_chacha = { version = "0.3.1", default-features = false } -static_assertions = { version = "1.1.0", optional = true } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -polkadot-runtime-metrics = { path = "../metrics", default-features = false } -polkadot-core-primitives = { path = "../../core-primitives", default-features = false } +rand = { workspace = true } +rand_chacha = { workspace = true } +static_assertions = { optional = true, workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-metrics = { workspace = true } +polkadot-core-primitives = { workspace = true } [dev-dependencies] -futures = "0.3.30" -hex-literal = "0.4.1" -sp-keyring = { path = "../../../substrate/primitives/keyring" } -frame-support-test = { path = "../../../substrate/frame/support/test" } -sc-keystore = { path = "../../../substrate/client/keystore" } -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } -thousands = "0.2.0" -assert_matches = "1" -rstest = "0.18.2" +futures = { workspace = true } +hex-literal = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +frame-support-test = { workspace = true } +sc-keystore = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +thousands = { workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } serde_json = { workspace = true, default-features = true } [features] @@ -110,7 +98,6 @@ std = [ "polkadot-runtime-metrics/std", "rand/std", "rand_chacha/std", - "rustc-hex/std", "scale-info/std", "serde/std", "sp-api/std", @@ -158,6 +145,7 @@ try-runtime = [ "pallet-balances/try-runtime", "pallet-broker/try-runtime", "pallet-message-queue/try-runtime", + "pallet-mmr/try-runtime", "pallet-session/try-runtime", "pallet-staking/try-runtime", "pallet-timestamp/try-runtime", diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index 81a0988ea67cd3eb3b76784d360d46a1ddf1633e..bab09eda52c2d630171bb790831152d945b718db 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -74,6 +74,9 @@ fn run_to_block( Paras::initializer_initialize(b + 1); Scheduler::initializer_initialize(b + 1); + // Update the spot traffic and revenue on every block. + OnDemandAssigner::on_initialize(b + 1); + // In the real runtime this is expected to be called by the `InclusionInherent` pallet. Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 043a36d99c4974c5dd75f3f4172848a4707c871a..03f05842bca498a26518bbeb0e8f957f2fa5783c 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -31,38 +31,40 @@ //! occupying multiple cores in on-demand, we will likely add a separate order type, where the //! intent can be made explicit. +use sp_runtime::traits::Zero; mod benchmarking; pub mod migration; mod mock_helpers; +mod types; extern crate alloc; #[cfg(test)] mod tests; -use core::mem::take; - use crate::{configuration, paras, scheduler::common::Assignment}; - +use alloc::collections::BinaryHeap; +use core::mem::take; use frame_support::{ pallet_prelude::*, traits::{ + defensive_prelude::*, Currency, ExistenceRequirement::{self, AllowDeath, KeepAlive}, WithdrawReasons, }, + PalletId, }; -use frame_system::pallet_prelude::*; -use polkadot_primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; +use frame_system::{pallet_prelude::*, Pallet as System}; +use polkadot_primitives::{CoreIndex, Id as ParaId}; use sp_runtime::{ - traits::{One, SaturatedConversion}, + traits::{AccountIdConversion, One, SaturatedConversion}, FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, }; - -use alloc::collections::BinaryHeap; -use sp_std::{ - cmp::{Ord, Ordering, PartialOrd}, - prelude::*, +use sp_std::prelude::*; +use types::{ + BalanceOf, CoreAffinityCount, EnqueuedOrder, QueuePushDirection, QueueStatusType, + SpotTrafficCalculationErr, }; const LOG_TARGET: &str = "runtime::parachains::assigner-on-demand"; @@ -87,217 +89,6 @@ impl WeightInfo for TestWeightInfo { } } -/// Meta data for full queue. -/// -/// This includes elements with affinity and free entries. -/// -/// The actual queue is implemented via multiple priority queues. One for each core, for entries -/// which currently have a core affinity and one free queue, with entries without any affinity yet. -/// -/// The design aims to have most queue accessess be O(1) or O(log(N)). Absolute worst case is O(N). -/// Importantly this includes all accessess that happen in a single block. Even with 50 cores, the -/// total complexity of all operations in the block should maintain above complexities. In -/// particular O(N) stays O(N), it should never be O(N*cores). -/// -/// More concrete rundown on complexity: -/// -/// - insert: O(1) for placing an order, O(log(N)) for push backs. -/// - pop_assignment_for_core: O(log(N)), O(N) worst case: Can only happen for one core, next core -/// is already less work. -/// - report_processed & push back: If affinity dropped to 0, then O(N) in the worst case. Again -/// this divides per core. -/// -/// Reads still exist, also improved slightly, but worst case we fetch all entries. -#[derive(Encode, Decode, TypeInfo)] -struct QueueStatusType { - /// Last calculated traffic value. - traffic: FixedU128, - /// The next index to use. - next_index: QueueIndex, - /// Smallest index still in use. - /// - /// In case of a completely empty queue (free + affinity queues), `next_index - smallest_index - /// == 0`. - smallest_index: QueueIndex, - /// Indices that have been freed already. - /// - /// But have a hole to `smallest_index`, so we can not yet bump `smallest_index`. This binary - /// heap is roughly bounded in the number of on demand cores: - /// - /// For a single core, elements will always be processed in order. With each core added, a - /// level of out of order execution is added. - freed_indices: BinaryHeap, -} - -impl Default for QueueStatusType { - fn default() -> QueueStatusType { - QueueStatusType { - traffic: FixedU128::default(), - next_index: QueueIndex(0), - smallest_index: QueueIndex(0), - freed_indices: BinaryHeap::new(), - } - } -} - -impl QueueStatusType { - /// How many orders are queued in total? - /// - /// This includes entries which have core affinity. - fn size(&self) -> u32 { - self.next_index - .0 - .overflowing_sub(self.smallest_index.0) - .0 - .saturating_sub(self.freed_indices.len() as u32) - } - - /// Get current next index - /// - /// to use for an element newly pushed to the back of the queue. - fn push_back(&mut self) -> QueueIndex { - let QueueIndex(next_index) = self.next_index; - self.next_index = QueueIndex(next_index.overflowing_add(1).0); - QueueIndex(next_index) - } - - /// Push something to the front of the queue - fn push_front(&mut self) -> QueueIndex { - self.smallest_index = QueueIndex(self.smallest_index.0.overflowing_sub(1).0); - self.smallest_index - } - - /// The given index is no longer part of the queue. - /// - /// This updates `smallest_index` if need be. - fn consume_index(&mut self, removed_index: QueueIndex) { - if removed_index != self.smallest_index { - self.freed_indices.push(removed_index.reverse()); - return; - } - let mut index = self.smallest_index.0.overflowing_add(1).0; - // Even more to advance? - while self.freed_indices.peek() == Some(&ReverseQueueIndex(index)) { - index = index.overflowing_add(1).0; - self.freed_indices.pop(); - } - self.smallest_index = QueueIndex(index); - } -} - -/// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a -/// specific `ParaId`. -#[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] -#[cfg_attr(test, derive(PartialEq, RuntimeDebug))] -struct CoreAffinityCount { - core_index: CoreIndex, - count: u32, -} - -/// An indicator as to which end of the `OnDemandQueue` an assignment will be placed. -#[cfg_attr(test, derive(RuntimeDebug))] -enum QueuePushDirection { - Back, - Front, -} - -/// Shorthand for the Balance type the runtime is using. -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - -/// Errors that can happen during spot traffic calculation. -#[derive(PartialEq, RuntimeDebug)] -enum SpotTrafficCalculationErr { - /// The order queue capacity is at 0. - QueueCapacityIsZero, - /// The queue size is larger than the queue capacity. - QueueSizeLargerThanCapacity, - /// Arithmetic error during division, either division by 0 or over/underflow. - Division, -} - -/// Type used for priority indices. -// NOTE: The `Ord` implementation for this type is unsound in the general case. -// Do not use it for anything but it's intended purpose. -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] -struct QueueIndex(u32); - -/// QueueIndex with reverse ordering. -/// -/// Same as `Reverse(QueueIndex)`, but with all the needed traits implemented. -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] -struct ReverseQueueIndex(u32); - -impl QueueIndex { - fn reverse(self) -> ReverseQueueIndex { - ReverseQueueIndex(self.0) - } -} - -impl Ord for QueueIndex { - fn cmp(&self, other: &Self) -> Ordering { - let diff = self.0.overflowing_sub(other.0).0; - if diff == 0 { - Ordering::Equal - } else if diff <= ON_DEMAND_MAX_QUEUE_MAX_SIZE { - Ordering::Greater - } else { - Ordering::Less - } - } -} - -impl PartialOrd for QueueIndex { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for ReverseQueueIndex { - fn cmp(&self, other: &Self) -> Ordering { - QueueIndex(other.0).cmp(&QueueIndex(self.0)) - } -} -impl PartialOrd for ReverseQueueIndex { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(&other)) - } -} - -/// Internal representation of an order after it has been enqueued already. -/// -/// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards -/// to its elements) -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] -struct EnqueuedOrder { - para_id: ParaId, - idx: QueueIndex, -} - -impl EnqueuedOrder { - fn new(idx: QueueIndex, para_id: ParaId) -> Self { - Self { idx, para_id } - } -} - -impl PartialOrd for EnqueuedOrder { - fn partial_cmp(&self, other: &Self) -> Option { - match other.idx.partial_cmp(&self.idx) { - Some(Ordering::Equal) => other.para_id.partial_cmp(&self.para_id), - o => o, - } - } -} - -impl Ord for EnqueuedOrder { - fn cmp(&self, other: &Self) -> Ordering { - match other.idx.cmp(&self.idx) { - Ordering::Equal => other.para_id.cmp(&self.para_id), - o => o, - } - } -} - #[frame_support::pallet] pub mod pallet { @@ -324,6 +115,15 @@ pub mod pallet { /// The default value for the spot traffic multiplier. #[pallet::constant] type TrafficDefaultValue: Get; + + /// The maximum number of blocks some historical revenue + /// information stored for. + #[pallet::constant] + type MaxHistoricalRevenue: Get; + + /// Identifier for the internal revenue balance. + #[pallet::constant] + type PalletId: Get; } /// Creates an empty queue status for an empty queue with initial traffic value. @@ -365,6 +165,11 @@ pub mod pallet { EntriesOnEmpty, >; + /// Keeps track of accumulated revenue from on demand order sales. + #[pallet::storage] + pub type Revenue = + StorageValue<_, BoundedVec, T::MaxHistoricalRevenue>, ValueQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -386,6 +191,19 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_now: BlockNumberFor) -> Weight { + // Update revenue information storage. + Revenue::::mutate(|revenue| { + if let Some(overdue) = + revenue.force_insert_keep_left(0, 0u32.into()).defensive_unwrap_or(None) + { + // We have some overdue revenue not claimed by the Coretime Chain, let's + // accumulate it at the oldest stored block + if let Some(last) = revenue.last_mut() { + *last = last.saturating_add(overdue); + } + } + }); + let config = configuration::ActiveConfig::::get(); // We need to update the spot traffic on block initialize in order to account for idle // blocks. @@ -393,8 +211,9 @@ pub mod pallet { Self::update_spot_traffic(&config, queue_status); }); - // 2 reads in config and queuestatus, at maximum 1 write to queuestatus. - T::DbWeight::get().reads_writes(2, 1) + // Reads: `Revenue`, `ActiveConfig`, `QueueStatus` + // Writes: `Revenue`, `QueueStatus` + T::DbWeight::get().reads_writes(3, 2) } } @@ -527,7 +346,8 @@ where } /// Helper function for `place_order_*` calls. Used to differentiate between placing orders - /// with a keep alive check or to allow the account to be reaped. + /// with a keep alive check or to allow the account to be reaped. The amount charged is + /// stored to the pallet account to be later paid out as revenue. /// /// Parameters: /// - `sender`: The sender of the call, funds will be withdrawn from this account. @@ -562,18 +382,40 @@ where // Is the current price higher than `max_amount` ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); - // Charge the sending account the spot price - let _ = T::Currency::withdraw( + ensure!( + queue_status.size() < config.scheduler_params.on_demand_queue_max_size, + Error::::QueueFull + ); + + // Charge the sending account the spot price. The amount will be teleported to the + // broker chain once it requests revenue information. + let amt = T::Currency::withdraw( &sender, spot_price, WithdrawReasons::FEE, existence_requirement, )?; - ensure!( - queue_status.size() < config.scheduler_params.on_demand_queue_max_size, - Error::::QueueFull - ); + // Consume the negative imbalance and deposit it into the pallet account. Make sure the + // account preserves even without the existential deposit. + let pot = Self::account_id(); + if !System::::account_exists(&pot) { + System::::inc_providers(&pot); + } + T::Currency::resolve_creating(&pot, amt); + + // Add the amount to the current block's (index 0) revenue information. + Revenue::::mutate(|bounded_revenue| { + if let Some(current_block) = bounded_revenue.get_mut(0) { + *current_block = current_block.saturating_add(spot_price); + } else { + // Revenue has already been claimed in the same block, including the block + // itself. It shouldn't normally happen as revenue claims in the future are + // not allowed. + bounded_revenue.try_push(spot_price).defensive_ok(); + } + }); + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); Pallet::::deposit_event(Event::::OnDemandOrderPlaced { para_id, @@ -790,6 +632,29 @@ where }) } + /// Collect the revenue from the `when` blockheight + pub fn claim_revenue_until(when: BlockNumberFor) -> BalanceOf { + let now = >::block_number(); + let mut amount: BalanceOf = BalanceOf::::zero(); + Revenue::::mutate(|revenue| { + while !revenue.is_empty() { + let index = (revenue.len() - 1) as u32; + if when > now.saturating_sub(index.into()) { + amount = amount.saturating_add(revenue.pop().defensive_unwrap_or(0u32.into())); + } else { + break + } + } + }); + + amount + } + + /// Account of the pallet pot, where the funds from instantaneous coretime sale are accumulated. + pub fn account_id() -> T::AccountId { + T::PalletId::get().into_account_truncating() + } + /// Getter for the affinity tracker. #[cfg(test)] fn get_affinity_map(para_id: ParaId) -> Option { @@ -831,4 +696,9 @@ where fn get_traffic_default_value() -> FixedU128 { ::TrafficDefaultValue::get() } + + #[cfg(test)] + fn get_revenue() -> Vec> { + Revenue::::get().to_vec() + } } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index 5747413e71478eeb5b65984b58c3fe35beecadab..3d01ba655d3f404ac0e5b09d7ef590b175779534 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -17,7 +17,12 @@ use super::*; use crate::{ - assigner_on_demand::{mock_helpers::GenesisConfigBuilder, Error}, + assigner_on_demand::{ + self, + mock_helpers::GenesisConfigBuilder, + types::{QueueIndex, ReverseQueueIndex}, + Error, + }, initializer::SessionChangeNotification, mock::{ new_test_ext, Balances, OnDemandAssigner, Paras, ParasShared, RuntimeOrigin, Scheduler, @@ -27,8 +32,13 @@ use crate::{ }; use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use pallet_balances::Error as BalancesError; -use polkadot_primitives::{BlockNumber, SessionIndex, ValidationCode}; -use sp_std::collections::btree_map::BTreeMap; +use polkadot_primitives::{ + BlockNumber, SessionIndex, ValidationCode, ON_DEMAND_MAX_QUEUE_MAX_SIZE, +}; +use sp_std::{ + cmp::{Ord, Ordering}, + collections::btree_map::BTreeMap, +}; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { let validation_code: ValidationCode = vec![1, 2, 3].into(); @@ -73,7 +83,7 @@ fn run_to_block( Paras::initializer_initialize(b + 1); Scheduler::initializer_initialize(b + 1); - // We need to update the spot traffic on every block. + // Update the spot traffic and revenue on every block. OnDemandAssigner::on_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. @@ -81,16 +91,26 @@ fn run_to_block( } } -fn place_order(para_id: ParaId) { +fn place_order_run_to_blocknumber(para_id: ParaId, blocknumber: Option) { let alice = 100u64; let amt = 10_000_000u128; Balances::make_free_balance_be(&alice, amt); - run_to_block(101, |n| if n == 101 { Some(Default::default()) } else { None }); + if let Some(bn) = blocknumber { + run_to_block(bn, |n| if n == bn { Some(Default::default()) } else { None }); + } OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id).unwrap() } +fn place_order_run_to_101(para_id: ParaId) { + place_order_run_to_blocknumber(para_id, Some(101)); +} + +fn place_order(para_id: ParaId) { + place_order_run_to_blocknumber(para_id, None); +} + #[test] fn spot_traffic_capacity_zero_returns_none() { match OnDemandAssigner::calculate_spot_traffic( @@ -377,8 +397,8 @@ fn push_back_assignment_works() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); // Add enough assignments to the order queue. - place_order(para_a); - place_order(para_b); + place_order_run_to_101(para_a); + place_order_run_to_101(para_b); // Pop order a assert_eq!( @@ -424,9 +444,9 @@ fn affinity_prohibits_parallel_scheduling() { assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); // Add 2 assignments for para_a for every para_b. - place_order(para_a); - place_order(para_a); - place_order(para_b); + place_order_run_to_101(para_a); + place_order_run_to_101(para_a); + place_order_run_to_101(para_b); // Approximate having 1 core. for _ in 0..3 { @@ -448,9 +468,9 @@ fn affinity_prohibits_parallel_scheduling() { OnDemandAssigner::report_processed(para_b, 0.into()); // Add 2 assignments for para_a for every para_b. - place_order(para_a); - place_order(para_a); - place_order(para_b); + place_order_run_to_101(para_a); + place_order_run_to_101(para_a); + place_order_run_to_101(para_b); // Approximate having 3 cores. CoreIndex 2 should be unable to obtain an assignment for _ in 0..3 { @@ -490,7 +510,7 @@ fn affinity_changes_work() { // Add enough assignments to the order queue. for _ in 0..10 { - place_order(para_a); + place_order_run_to_101(para_a); } // There should be no affinity before the scheduler pops. @@ -554,7 +574,7 @@ fn new_affinity_for_a_core_must_come_from_free_entries() { // Place orders for all chains. parachains.iter().for_each(|chain| { - place_order(*chain); + place_order_run_to_101(*chain); }); // There are 4 entries in free_entries. @@ -679,8 +699,8 @@ fn queue_status_size_fn_works() { // Place orders for all chains. parachains.iter().for_each(|chain| { // 2 per chain for a total of 6 - place_order(*chain); - place_order(*chain); + place_order_run_to_101(*chain); + place_order_run_to_101(*chain); }); // 6 orders in free entries @@ -707,3 +727,112 @@ fn queue_status_size_fn_works() { assert_eq!(OnDemandAssigner::get_queue_status().size(), 4) }); } + +#[test] +fn revenue_information_fetching_works() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + schedule_blank_para(para_a, ParaKind::Parathread); + // Mock assigner sets max revenue history to 10. + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + let revenue = OnDemandAssigner::claim_revenue_until(10); + + // No revenue should be recorded. + assert_eq!(revenue, 0); + + // Place one order + place_order_run_to_blocknumber(para_a, Some(11)); + let revenue = OnDemandAssigner::get_revenue(); + let claim = OnDemandAssigner::claim_revenue_until(11); + + // Revenue until the current block is still zero as "until" is non-inclusive + assert_eq!(claim, 0); + + run_to_block(12, |n| if n == 12 { Some(Default::default()) } else { None }); + let claim = OnDemandAssigner::claim_revenue_until(12); + + // Revenue for a single order should be recorded and shouldn't have been pruned by the + // previous call + assert_eq!(claim, revenue[0]); + + // Place many orders + place_order(para_a); + place_order(para_a); + + run_to_block(13, |n| if n == 13 { Some(Default::default()) } else { None }); + + place_order(para_a); + + run_to_block(15, |n| if n == 14 { Some(Default::default()) } else { None }); + + let revenue = OnDemandAssigner::claim_revenue_until(15); + + // All 3 orders should be accounted for. + assert_eq!(revenue, 30_000); + + // Place one order + place_order_run_to_blocknumber(para_a, Some(16)); + + let revenue = OnDemandAssigner::claim_revenue_until(15); + + // Order is not in range of the revenue_until call + assert_eq!(revenue, 0); + + run_to_block(21, |n| if n == 20 { Some(Default::default()) } else { None }); + let revenue = OnDemandAssigner::claim_revenue_until(21); + assert_eq!(revenue, 10_000); + + // Make sure overdue revenue is accumulated + for i in 21..=35 { + run_to_block(i, |n| if n % 10 == 0 { Some(Default::default()) } else { None }); + place_order(para_a); + } + run_to_block(36, |_| None); + let revenue = OnDemandAssigner::claim_revenue_until(36); + assert_eq!(revenue, 150_000); + }); +} + +#[test] +fn pot_account_is_immortal() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let pot = OnDemandAssigner::account_id(); + assert!(!System::account_exists(&pot)); + schedule_blank_para(para_a, ParaKind::Parathread); + // Mock assigner sets max revenue history to 10. + + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + place_order_run_to_blocknumber(para_a, Some(12)); + let purchase_revenue = Balances::free_balance(&pot); + assert!(purchase_revenue > 0); + + run_to_block(15, |_| None); + let _imb = ::Currency::withdraw( + &pot, + purchase_revenue, + WithdrawReasons::FEE, + ExistenceRequirement::AllowDeath, + ); + assert_eq!(Balances::free_balance(&pot), 0); + assert!(System::account_exists(&pot)); + assert_eq!(System::providers(&pot), 1); + + // One more cycle to make sure providers are not increased on every transition from zero + run_to_block(20, |n| if n == 20 { Some(Default::default()) } else { None }); + place_order_run_to_blocknumber(para_a, Some(22)); + let purchase_revenue = Balances::free_balance(&pot); + assert!(purchase_revenue > 0); + + run_to_block(25, |_| None); + let _imb = ::Currency::withdraw( + &pot, + purchase_revenue, + WithdrawReasons::FEE, + ExistenceRequirement::AllowDeath, + ); + assert_eq!(Balances::free_balance(&pot), 0); + assert!(System::account_exists(&pot)); + assert_eq!(System::providers(&pot), 1); + }); +} diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/types.rs b/polkadot/runtime/parachains/src/assigner_on_demand/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..51d586a77a1784c905469dc48ed58fd382a1186b --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_on_demand/types.rs @@ -0,0 +1,241 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! On demand module types. + +use super::{alloc, pallet::Config}; +use alloc::collections::BinaryHeap; +use frame_support::{ + pallet_prelude::{Decode, Encode, RuntimeDebug, TypeInfo}, + traits::Currency, +}; +use polkadot_primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; +use sp_runtime::FixedU128; +use sp_std::{ + cmp::{Ord, Ordering, PartialOrd}, + prelude::*, +}; + +/// Shorthand for the Balance type the runtime is using. +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// Meta data for full queue. +/// +/// This includes elements with affinity and free entries. +/// +/// The actual queue is implemented via multiple priority queues. One for each core, for entries +/// which currently have a core affinity and one free queue, with entries without any affinity yet. +/// +/// The design aims to have most queue accessess be O(1) or O(log(N)). Absolute worst case is O(N). +/// Importantly this includes all accessess that happen in a single block. Even with 50 cores, the +/// total complexity of all operations in the block should maintain above complexities. In +/// particular O(N) stays O(N), it should never be O(N*cores). +/// +/// More concrete rundown on complexity: +/// +/// - insert: O(1) for placing an order, O(log(N)) for push backs. +/// - pop_assignment_for_core: O(log(N)), O(N) worst case: Can only happen for one core, next core +/// is already less work. +/// - report_processed & push back: If affinity dropped to 0, then O(N) in the worst case. Again +/// this divides per core. +/// +/// Reads still exist, also improved slightly, but worst case we fetch all entries. +#[derive(Encode, Decode, TypeInfo)] +pub struct QueueStatusType { + /// Last calculated traffic value. + pub traffic: FixedU128, + /// The next index to use. + pub next_index: QueueIndex, + /// Smallest index still in use. + /// + /// In case of a completely empty queue (free + affinity queues), `next_index - smallest_index + /// == 0`. + pub smallest_index: QueueIndex, + /// Indices that have been freed already. + /// + /// But have a hole to `smallest_index`, so we can not yet bump `smallest_index`. This binary + /// heap is roughly bounded in the number of on demand cores: + /// + /// For a single core, elements will always be processed in order. With each core added, a + /// level of out of order execution is added. + pub freed_indices: BinaryHeap, +} + +impl Default for QueueStatusType { + fn default() -> QueueStatusType { + QueueStatusType { + traffic: FixedU128::default(), + next_index: QueueIndex(0), + smallest_index: QueueIndex(0), + freed_indices: BinaryHeap::new(), + } + } +} + +impl QueueStatusType { + /// How many orders are queued in total? + /// + /// This includes entries which have core affinity. + pub fn size(&self) -> u32 { + self.next_index + .0 + .overflowing_sub(self.smallest_index.0) + .0 + .saturating_sub(self.freed_indices.len() as u32) + } + + /// Get current next index + /// + /// to use for an element newly pushed to the back of the queue. + pub fn push_back(&mut self) -> QueueIndex { + let QueueIndex(next_index) = self.next_index; + self.next_index = QueueIndex(next_index.overflowing_add(1).0); + QueueIndex(next_index) + } + + /// Push something to the front of the queue + pub fn push_front(&mut self) -> QueueIndex { + self.smallest_index = QueueIndex(self.smallest_index.0.overflowing_sub(1).0); + self.smallest_index + } + + /// The given index is no longer part of the queue. + /// + /// This updates `smallest_index` if need be. + pub fn consume_index(&mut self, removed_index: QueueIndex) { + if removed_index != self.smallest_index { + self.freed_indices.push(removed_index.reverse()); + return; + } + let mut index = self.smallest_index.0.overflowing_add(1).0; + // Even more to advance? + while self.freed_indices.peek() == Some(&ReverseQueueIndex(index)) { + index = index.overflowing_add(1).0; + self.freed_indices.pop(); + } + self.smallest_index = QueueIndex(index); + } +} + +/// Type used for priority indices. +// NOTE: The `Ord` implementation for this type is unsound in the general case. +// Do not use it for anything but it's intended purpose. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] +pub struct QueueIndex(pub u32); + +/// QueueIndex with reverse ordering. +/// +/// Same as `Reverse(QueueIndex)`, but with all the needed traits implemented. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] +pub struct ReverseQueueIndex(pub u32); + +impl QueueIndex { + fn reverse(self) -> ReverseQueueIndex { + ReverseQueueIndex(self.0) + } +} + +impl Ord for QueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + let diff = self.0.overflowing_sub(other.0).0; + if diff == 0 { + Ordering::Equal + } else if diff <= ON_DEMAND_MAX_QUEUE_MAX_SIZE { + Ordering::Greater + } else { + Ordering::Less + } + } +} + +impl PartialOrd for QueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ReverseQueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + QueueIndex(other.0).cmp(&QueueIndex(self.0)) + } +} +impl PartialOrd for ReverseQueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(&other)) + } +} + +/// Internal representation of an order after it has been enqueued already. +/// +/// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards +/// to its elements) +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] +pub struct EnqueuedOrder { + pub para_id: ParaId, + pub idx: QueueIndex, +} + +impl EnqueuedOrder { + pub fn new(idx: QueueIndex, para_id: ParaId) -> Self { + Self { idx, para_id } + } +} + +impl PartialOrd for EnqueuedOrder { + fn partial_cmp(&self, other: &Self) -> Option { + match other.idx.partial_cmp(&self.idx) { + Some(Ordering::Equal) => other.para_id.partial_cmp(&self.para_id), + o => o, + } + } +} + +impl Ord for EnqueuedOrder { + fn cmp(&self, other: &Self) -> Ordering { + match other.idx.cmp(&self.idx) { + Ordering::Equal => other.para_id.cmp(&self.para_id), + o => o, + } + } +} + +/// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a +/// specific `ParaId`. +#[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] +#[cfg_attr(test, derive(PartialEq, RuntimeDebug))] +pub struct CoreAffinityCount { + pub core_index: CoreIndex, + pub count: u32, +} + +/// An indicator as to which end of the `OnDemandQueue` an assignment will be placed. +#[cfg_attr(test, derive(RuntimeDebug))] +pub enum QueuePushDirection { + Back, + Front, +} + +/// Errors that can happen during spot traffic calculation. +#[derive(PartialEq, RuntimeDebug)] +pub enum SpotTrafficCalculationErr { + /// The order queue capacity is at 0. + QueueCapacityIsZero, + /// The queue size is larger than the queue capacity. + QueueSizeLargerThanCapacity, + /// Arithmetic error during division, either division by 0 or over/underflow. + Division, +} diff --git a/polkadot/runtime/parachains/src/coretime/benchmarking.rs b/polkadot/runtime/parachains/src/coretime/benchmarking.rs index d1ac71f580ee0e70015bf130b6836519005ee280..028250e188ee9885ad9c2c80d66ffa12f41be9e2 100644 --- a/polkadot/runtime/parachains/src/coretime/benchmarking.rs +++ b/polkadot/runtime/parachains/src/coretime/benchmarking.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! On demand assigner pallet benchmarking. +//! Coretime pallet benchmarking. #![cfg(feature = "runtime-benchmarks")] @@ -28,6 +28,30 @@ mod benchmarks { use super::*; use assigner_coretime::PartsOf57600; + #[benchmark] + fn request_revenue_at() { + let root_origin = ::RuntimeOrigin::root(); + let mhr = ::MaxHistoricalRevenue::get(); + frame_system::Pallet::::set_block_number((mhr + 2).into()); + let minimum_balance = ::Currency::minimum_balance(); + let rev: BoundedVec< + <::Currency as frame_support::traits::Currency< + T::AccountId, + >>::Balance, + T::MaxHistoricalRevenue, + > = BoundedVec::try_from((1..=mhr).map(|v| minimum_balance * v.into()).collect::>()) + .unwrap(); + assigner_on_demand::Revenue::::put(rev); + + ::Currency::make_free_balance_be( + &>::account_id(), + minimum_balance * (mhr * (mhr + 1)).into(), + ); + + #[extrinsic_call] + _(root_origin as ::RuntimeOrigin, mhr + 1) + } + #[benchmark] fn request_core_count() { // Setup diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index dedffb733d33ef120f6b220afc550f04b77f0979..fc8a3c7d9d24b217d1de883c6cb3ed8b3f7be6e2 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -18,20 +18,36 @@ //! //! -use sp_std::{prelude::*, result}; - -use frame_support::{pallet_prelude::*, traits::Currency}; +use frame_support::{ + pallet_prelude::*, + traits::{defensive_prelude::*, Currency}, +}; use frame_system::pallet_prelude::*; pub use pallet::*; use pallet_broker::{CoreAssignment, CoreIndex as BrokerCoreIndex}; -use polkadot_primitives::{CoreIndex, Id as ParaId}; +use polkadot_primitives::{Balance, BlockNumber, CoreIndex, Id as ParaId}; use sp_arithmetic::traits::SaturatedConversion; -use xcm::prelude::{ - send_xcm, Instruction, Junction, Location, OriginKind, SendXcm, WeightLimit, Xcm, +use sp_runtime::traits::TryConvert; +use sp_std::{prelude::*, result}; +use xcm::{ + prelude::{send_xcm, Instruction, Junction, Location, OriginKind, SendXcm, WeightLimit, Xcm}, + v4::{ + Asset, + AssetFilter::Wild, + AssetId, Assets, Error as XcmError, + Fungibility::Fungible, + Instruction::{DepositAsset, ReceiveTeleportedAsset}, + Junctions::Here, + Reanchorable, + WildAsset::AllCounted, + XcmContext, + }, }; +use xcm_executor::traits::TransactAsset; use crate::{ assigner_coretime::{self, PartsOf57600}, + assigner_on_demand, initializer::{OnNewSession, SessionChangeNotification}, origin::{ensure_parachain, Origin}, }; @@ -39,9 +55,11 @@ use crate::{ mod benchmarking; pub mod migration; +const LOG_TARGET: &str = "runtime::parachains::coretime"; + pub trait WeightInfo { fn request_core_count() -> Weight; - //fn request_revenue_info_at() -> Weight; + fn request_revenue_at() -> Weight; //fn credit_account() -> Weight; fn assign_core(s: u32) -> Weight; } @@ -53,19 +71,23 @@ impl WeightInfo for TestWeightInfo { fn request_core_count() -> Weight { Weight::MAX } - // TODO: Add real benchmarking functionality for each of these to - // benchmarking.rs, then uncomment here and in trait definition. - /*fn request_revenue_info_at() -> Weight { + fn request_revenue_at() -> Weight { Weight::MAX } - fn credit_account() -> Weight { - Weight::MAX - }*/ + // TODO: Add real benchmarking functionality for each of these to + // benchmarking.rs, then uncomment here and in trait definition. + //fn credit_account() -> Weight { + // Weight::MAX + //} fn assign_core(_s: u32) -> Weight { Weight::MAX } } +/// Shorthand for the Balance type the runtime is using. +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + /// Broker pallet index on the coretime chain. Used to /// /// construct remote calls. The codec index must correspond to the index of `Broker` in the @@ -85,6 +107,8 @@ enum CoretimeCalls { SetLease(pallet_broker::TaskId, pallet_broker::Timeslice), #[codec(index = 19)] NotifyCoreCount(u16), + #[codec(index = 20)] + NotifyRevenue((BlockNumber, Balance)), #[codec(index = 99)] SwapLeases(ParaId, ParaId), } @@ -92,6 +116,9 @@ enum CoretimeCalls { #[frame_support::pallet] pub mod pallet { use crate::configuration; + use sp_runtime::traits::TryConvert; + use xcm::v4::InteriorLocation; + use xcm_executor::traits::TransactAsset; use super::*; @@ -100,7 +127,9 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + assigner_coretime::Config { + pub trait Config: + frame_system::Config + assigner_coretime::Config + assigner_on_demand::Config + { type RuntimeOrigin: From<::RuntimeOrigin> + Into::RuntimeOrigin>>; type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -109,9 +138,17 @@ pub mod pallet { /// The ParaId of the coretime chain. #[pallet::constant] type BrokerId: Get; + /// The coretime chain pot location. + #[pallet::constant] + type BrokerPotLocation: Get; /// Something that provides the weight of this pallet. type WeightInfo: WeightInfo; + /// The XCM sender. type SendXcm: SendXcm; + /// The asset transactor. + type AssetTransactor: TransactAsset; + /// AccountId to Location converter + type AccountToLocation: for<'a> TryConvert<&'a Self::AccountId, Location>; /// Maximum weight for any XCM transact call that should be executed on the coretime chain. /// @@ -132,6 +169,11 @@ pub mod pallet { pub enum Error { /// The paraid making the call is not the coretime brokerage system parachain. NotBroker, + /// Requested revenue information `when` parameter was in the future from the current + /// block height. + RequestedFutureRevenue, + /// Failed to transfer assets to the coretime chain + AssetTransferFailed, } #[pallet::hooks] @@ -154,17 +196,17 @@ pub mod pallet { configuration::Pallet::::set_coretime_cores_unchecked(u32::from(count)) } - //// TODO Impl me! - ////#[pallet::weight(::WeightInfo::request_revenue_info_at())] - //#[pallet::call_index(2)] - //pub fn request_revenue_info_at( - // origin: OriginFor, - // _when: BlockNumberFor, - //) -> DispatchResult { - // // Ignore requests not coming from the coretime chain or root. - // Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; - // Ok(()) - //} + /// Request to claim the instantaneous coretime sales revenue starting from the block it was + /// last claimed until and up to the block specified. The claimed amount value is sent back + /// to the Coretime chain in a `notify_revenue` message. At the same time, the amount is + /// teleported to the Coretime chain. + #[pallet::weight(::WeightInfo::request_revenue_at())] + #[pallet::call_index(2)] + pub fn request_revenue_at(origin: OriginFor, when: BlockNumber) -> DispatchResult { + // Ignore requests not coming from the Coretime Chain or Root. + Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; + Self::notify_revenue(when) + } //// TODO Impl me! ////#[pallet::weight(::WeightInfo::credit_account())] @@ -244,11 +286,43 @@ impl Pallet { Location::new(0, [Junction::Parachain(T::BrokerId::get())]), message, ) { - log::error!("Sending `NotifyCoreCount` to coretime chain failed: {:?}", err); + log::error!(target: LOG_TARGET, "Sending `NotifyCoreCount` to coretime chain failed: {:?}", err); } } } + /// Provide the amount of revenue accumulated from Instantaneous Coretime Sales from Relay-chain + /// block number last_until to until, not including until itself. last_until is defined as being + /// the until argument of the last notify_revenue message sent, or zero for the first call. If + /// revenue is None, this indicates that the information is no longer available. This explicitly + /// disregards the possibility of multiple parachains requesting and being notified of revenue + /// information. + /// + /// The Relay-chain must be configured to ensure that only a single revenue information + /// destination exists. + pub fn notify_revenue(until: BlockNumber) -> DispatchResult { + let now = >::block_number(); + let until_bnf: BlockNumberFor = until.into(); + + // When cannot be in the future. + ensure!(until_bnf <= now, Error::::RequestedFutureRevenue); + + let amount = >::claim_revenue_until(until_bnf); + log::debug!(target: LOG_TARGET, "Revenue info requested: {:?}", amount); + + let raw_revenue: Balance = amount.try_into().map_err(|_| { + log::error!(target: LOG_TARGET, "Converting on demand revenue for `NotifyRevenue` failed"); + Error::::AssetTransferFailed + })?; + + do_notify_revenue::(until, raw_revenue).map_err(|err| { + log::error!(target: LOG_TARGET, "notify_revenue failed: {err:?}"); + Error::::AssetTransferFailed + })?; + + Ok(()) + } + // Handle legacy swaps in coretime. Notifies coretime chain that a lease swap has occurred via // XCM message. This function is meant to be used in an implementation of `OnSwap` trait. pub fn on_legacy_lease_swap(one: ParaId, other: ParaId) { @@ -263,7 +337,7 @@ impl Pallet { Location::new(0, [Junction::Parachain(T::BrokerId::get())]), message, ) { - log::error!("Sending `SwapLeases` to coretime chain failed: {:?}", err); + log::error!(target: LOG_TARGET, "Sending `SwapLeases` to coretime chain failed: {:?}", err); } } } @@ -281,3 +355,55 @@ fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruct call: BrokerRuntimePallets::Broker(call).encode().into(), } } + +fn do_notify_revenue(when: BlockNumber, raw_revenue: Balance) -> Result<(), XcmError> { + let dest = Junction::Parachain(T::BrokerId::get()).into_location(); + let mut message = Vec::new(); + let asset = Asset { id: AssetId(Location::here()), fun: Fungible(raw_revenue) }; + let dummy_xcm_context = XcmContext { origin: None, message_id: [0; 32], topic: None }; + + if raw_revenue > 0 { + let on_demand_pot = + T::AccountToLocation::try_convert(&>::account_id()) + .map_err(|err| { + log::error!( + target: LOG_TARGET, + "Failed to convert on-demand pot account to XCM location: {err:?}", + ); + XcmError::InvalidLocation + })?; + + let withdrawn = T::AssetTransactor::withdraw_asset(&asset, &on_demand_pot, None)?; + + T::AssetTransactor::can_check_out(&dest, &asset, &dummy_xcm_context)?; + + let assets_reanchored = Into::::into(withdrawn) + .reanchored(&dest, &Here.into()) + .defensive_map_err(|_| XcmError::ReanchorFailed)?; + + message.extend( + [ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + ReceiveTeleportedAsset(assets_reanchored), + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: T::BrokerPotLocation::get().into_location(), + }, + ] + .into_iter(), + ); + } + + message.push(mk_coretime_call::(CoretimeCalls::NotifyRevenue((when, raw_revenue)))); + + send_xcm::(dest.clone(), Xcm(message))?; + + if raw_revenue > 0 { + T::AssetTransactor::check_out(&dest, &asset, &dummy_xcm_context); + } + + Ok(()) +} diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 0a0be8432b2520201d52e6598058940794e8b53b..9c23347ebb58bb93bf72e2d94a1f95defcea27b9 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -36,6 +36,7 @@ use frame_support::{ Currency, ProcessMessage, ProcessMessageError, ValidatorSet, ValidatorSetWithIdentification, }, weights::{Weight, WeightMeter}, + PalletId, }; use frame_support_test::TestRandomness; use frame_system::limits; @@ -57,7 +58,7 @@ use sp_std::{ use std::collections::HashMap; use xcm::{ prelude::XcmVersion, - v4::{Assets, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}, + v4::{Assets, InteriorLocation, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}, IntoVersion, VersionedXcm, WrapVersion, }; @@ -139,20 +140,11 @@ parameter_types! { pub static ExistentialDeposit: u64 = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } parameter_types! { @@ -400,17 +392,23 @@ impl pallet_message_queue::Config for Test { type IdleMaxServiceWeight = (); } +impl assigner_parachains::Config for Test {} + parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); + // Production chains should keep this numbar around twice the + // defined Timeslice for Coretime. + pub const MaxHistoricalRevenue: BlockNumber = 2 * 5; + pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd"); } -impl assigner_parachains::Config for Test {} - impl assigner_on_demand::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type TrafficDefaultValue = OnDemandTrafficDefaultValue; type WeightInfo = crate::assigner_on_demand::TestWeightInfo; + type MaxHistoricalRevenue = MaxHistoricalRevenue; + type PalletId = OnDemandPalletId; } impl assigner_coretime::Config for Test {} @@ -420,6 +418,13 @@ parameter_types! { pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); } +pub struct BrokerPot; +impl Get for BrokerPot { + fn get() -> InteriorLocation { + unimplemented!() + } +} + impl coretime::Config for Test { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; @@ -428,6 +433,9 @@ impl coretime::Config for Test { type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; type MaxXcmTransactWeight = MaxXcmTransactWeight; + type BrokerPotLocation = BrokerPot; + type AssetTransactor = (); + type AccountToLocation = (); } pub struct DummyXcmSender; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 62e96e9fbb0511f5b188ff60a76a6d1620188d9b..f4e3db185fead7144e6cb0ae3455af98229d59f7 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -28,10 +28,10 @@ use sp_std::{ pub fn claim_queue() -> BTreeMap> { let now = >::block_number() + One::one(); - // This explicit update is only strictly required for session boundaries: - // - // At the end of a session we clear the claim queues: Without this update call, nothing would be - // scheduled to the client. + // This is needed so that the claim queue always has the right size (equal to + // scheduling_lookahead). Otherwise, if a candidate is backed in the same block where the + // previous candidate is included, the claim queue will have already pop()-ed the next item + // from the queue and the length would be `scheduling_lookahead - 1`. >::free_cores_and_fill_claim_queue(Vec::new(), now); let config = configuration::ActiveConfig::::get(); // Extra sanity, config should already never be smaller than 1: diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 33b4d849c490f2bcbb3e931118dad8f5c5c3850b..d7fe5c06863c87413f988a972790c5d3482b513f 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -351,6 +351,9 @@ impl Pallet { } /// Note that the given cores have become occupied. Update the claim queue accordingly. + /// This will not push a new entry onto the claim queue, so the length after this call will be + /// the expected length - 1. The claim_queue runtime API will take care of adding another entry + /// here, to ensure the right lookahead. pub(crate) fn occupied( now_occupied: BTreeMap, ) -> BTreeMap { diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index d342926d3c5a08eb50e6317882a9b4521c3115d2..c4fbd461a631849010e4d3de7ab98a7b743432fb 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -11,117 +11,117 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } serde = { workspace = true } serde_derive = { optional = true, workspace = true } serde_json = { features = ["alloc"], workspace = true } -static_assertions = "1.1.0" -smallvec = "1.8.0" -bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } +static_assertions = { workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false } -binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } -rococo-runtime-constants = { package = "rococo-runtime-constants", path = "constants", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true } +sp-consensus-grandpa = { workspace = true } +binary-merkle-tree = { workspace = true } +rococo-runtime-constants = { workspace = true } +sp-api = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-arithmetic = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-version = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-block-builder = { workspace = true } -pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-beefy = { path = "../../../substrate/frame/beefy", default-features = false } -pallet-beefy-mmr = { path = "../../../substrate/frame/beefy-mmr", default-features = false } -pallet-bounties = { path = "../../../substrate/frame/bounties", default-features = false } -pallet-child-bounties = { path = "../../../substrate/frame/child-bounties", default-features = false } -pallet-state-trie-migration = { path = "../../../substrate/frame/state-trie-migration", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-collective = { path = "../../../substrate/frame/collective", default-features = false } -pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } -pallet-democracy = { path = "../../../substrate/frame/democracy", default-features = false } -pallet-elections-phragmen = { path = "../../../substrate/frame/elections-phragmen", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } -pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } -pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default-features = false } -pallet-multisig = { path = "../../../substrate/frame/multisig", default-features = false } -pallet-nis = { path = "../../../substrate/frame/nis", default-features = false } -pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } -pallet-parameters = { path = "../../../substrate/frame/parameters", default-features = false } -pallet-preimage = { path = "../../../substrate/frame/preimage", default-features = false } -pallet-proxy = { path = "../../../substrate/frame/proxy", default-features = false } -pallet-ranked-collective = { path = "../../../substrate/frame/ranked-collective", default-features = false } -pallet-recovery = { path = "../../../substrate/frame/recovery", default-features = false } -pallet-referenda = { path = "../../../substrate/frame/referenda", default-features = false } -pallet-scheduler = { path = "../../../substrate/frame/scheduler", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -pallet-society = { path = "../../../substrate/frame/society", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["tuples-96"] } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-tips = { path = "../../../substrate/frame/tips", default-features = false } -pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } -pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } -pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } -pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } +pallet-authority-discovery = { workspace = true } +pallet-authorship = { workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +pallet-beefy = { workspace = true } +pallet-beefy-mmr = { workspace = true } +pallet-bounties = { workspace = true } +pallet-child-bounties = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-collective = { workspace = true } +pallet-conviction-voting = { workspace = true } +pallet-democracy = { workspace = true } +pallet-elections-phragmen = { workspace = true } +pallet-asset-rate = { workspace = true } +frame-executive = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-identity = { workspace = true } +pallet-indices = { workspace = true } +pallet-membership = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-mmr = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nis = { workspace = true } +pallet-offences = { workspace = true } +pallet-parameters = { workspace = true } +pallet-preimage = { workspace = true } +pallet-proxy = { workspace = true } +pallet-ranked-collective = { workspace = true } +pallet-recovery = { workspace = true } +pallet-referenda = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-session = { workspace = true } +pallet-society = { workspace = true } +pallet-sudo = { workspace = true } +frame-support = { features = ["tuples-96"], workspace = true } +pallet-staking = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-tips = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } +pallet-vesting = { workspace = true } +pallet-whitelist = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +pallet-root-testing = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -hex-literal = { version = "0.4.1" } +frame-benchmarking = { optional = true, workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +hex-literal = { workspace = true, default-features = true } -polkadot-runtime-common = { path = "../common", default-features = false } -polkadot-runtime-parachains = { path = "../parachains", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-runtime-parachains = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } +xcm-runtime-apis = { workspace = true } [dev-dependencies] -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } -sp-trie = { path = "../../../substrate/primitives/trie" } -separator = "0.4.1" +tiny-keccak = { features = ["keccak"], workspace = true } +sp-keyring = { workspace = true, default-features = true } +remote-externalities = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +separator = { workspace = true } serde_json = { workspace = true, default-features = true } -sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } -tokio = { version = "1.24.2", features = ["macros"] } +sp-tracing = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -217,7 +217,7 @@ std = [ "substrate-wasm-builder", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] runtime-benchmarks = [ @@ -270,7 +270,7 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index 2c49488077e6f82b6a306c2b98d7d0f493c4681c..b67c36d71fd8721c583789a7a283868111afc037 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -10,17 +10,17 @@ license.workspace = true workspace = true [dependencies] -smallvec = "1.8.0" +smallvec = { workspace = true, default-features = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -polkadot-primitives = { path = "../../../primitives", default-features = false } -polkadot-runtime-common = { path = "../../common", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +frame-support = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } +sp-core = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [features] default = ["std"] diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 1dcafdcbc4d9a6fe48fd460e3ac836d95bb3372b..47b50bf6c106918c5539cb327dd02af617de8c45 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -121,6 +121,17 @@ pub mod system_parachain { /// All system parachains of Rococo. pub type SystemParachains = IsChildSystemParachain; + + /// Coretime constants + pub mod coretime { + /// Coretime timeslice period in blocks + /// WARNING: This constant is used accross chains, so additional care should be taken + /// when changing it. + #[cfg(feature = "fast-runtime")] + pub const TIMESLICE_PERIOD: u32 = 20; + #[cfg(not(feature = "fast-runtime"))] + pub const TIMESLICE_PERIOD: u32 = 80; + } } /// Rococo Treasury pallet instance. diff --git a/polkadot/runtime/rococo/src/governance/fellowship.rs b/polkadot/runtime/rococo/src/governance/fellowship.rs index a589b768afde2c0757e74a6535509228f1613a82..27a58a0eebd183bc5d21efa655113cf3a0251cc0 100644 --- a/polkadot/runtime/rococo/src/governance/fellowship.rs +++ b/polkadot/runtime/rococo/src/governance/fellowship.rs @@ -356,6 +356,7 @@ impl pallet_ranked_collective::Config for Runtime type MinRankOfClass = sp_runtime::traits::Identity; type MemberSwappedHandler = (); type VoteWeight = pallet_ranked_collective::Geometric; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (); } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 04203f4bfb793afddb20b5e3c750733bb61cff6b..010304efe8b9bf4552feac447a6bc456a8d3ccf0 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -60,7 +60,7 @@ use polkadot_runtime_parachains::{ scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; -use rococo_runtime_constants::system_parachain::BROKER_ID; +use rococo_runtime_constants::system_parachain::{coretime::TIMESLICE_PERIOD, BROKER_ID}; use scale_info::TypeInfo; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_beefy::{ @@ -92,12 +92,13 @@ use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; use pallet_session::historical as session_historical; use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInfo}; -use sp_core::{ConstU128, ConstU8, OpaqueMetadata, H256}; +use sp_core::{ConstU128, ConstU8, Get, OpaqueMetadata, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ - BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, Extrinsic as ExtrinsicT, - IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, + Extrinsic as ExtrinsicT, IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, + Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, @@ -131,7 +132,7 @@ use governance::{ pallet_custom_origins, AuctionAdmin, Fellows, GeneralAdmin, LeaseAdmin, Treasurer, TreasurySpender, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -162,7 +163,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, @@ -476,9 +477,6 @@ parameter_types! { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 2000 * CENTS; - pub const ProposalBondMaximum: Balance = 1 * GRAND; pub const SpendPeriod: BlockNumber = 6 * DAYS; pub const Burn: Permill = Permill::from_perthousand(2); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); @@ -501,13 +499,8 @@ parameter_types! { impl pallet_treasury::Config for Runtime { type PalletId = TreasuryPalletId; type Currency = Balances; - type ApproveOrigin = EitherOfDiverse, Treasurer>; type RejectOrigin = EitherOfDiverse, Treasurer>; type RuntimeEvent = RuntimeEvent; - type OnSlash = Treasury; - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type ProposalBondMaximum = ProposalBondMaximum; type SpendPeriod = SpendPeriod; type Burn = Burn; type BurnDestination = Society; @@ -566,6 +559,7 @@ impl pallet_bounties::Config for Runtime { type RuntimeEvent = RuntimeEvent; type MaximumReasonLength = MaximumReasonLength; type WeightInfo = weights::pallet_bounties::WeightInfo; + type OnSlash = Treasury; } parameter_types! { @@ -1064,21 +1058,39 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; + pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } +pub struct BrokerPot; +impl Get for BrokerPot { + fn get() -> InteriorLocation { + Junction::AccountId32 { network: None, id: BrokerPalletId::get().into_account_truncating() } + .into() + } +} + impl coretime::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BrokerId = BrokerId; + type BrokerPotLocation = BrokerPot; type WeightInfo = weights::runtime_parachains_coretime::WeightInfo; type SendXcm = crate::xcm_config::XcmRouter; + type AssetTransactor = crate::xcm_config::LocalAssetTransactor; + type AccountToLocation = xcm_builder::AliasesIntoAccountId32< + xcm_config::ThisNetwork, + ::AccountId, + >; type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); + // Keep 2 timeslices worth of revenue information. + pub const MaxHistoricalRevenue: BlockNumber = 2 * TIMESLICE_PERIOD; + pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd"); } impl parachains_assigner_on_demand::Config for Runtime { @@ -1086,6 +1098,8 @@ impl parachains_assigner_on_demand::Config for Runtime { type Currency = Balances; type TrafficDefaultValue = OnDemandTrafficDefaultValue; type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; + type MaxHistoricalRevenue = MaxHistoricalRevenue; + type PalletId = OnDemandPalletId; } impl parachains_assigner_coretime::Config for Runtime {} @@ -1269,6 +1283,7 @@ impl pallet_beefy::Config for Runtime { type MaxNominators = ConstU32<0>; type MaxSetIdSessionEntries = BeefySetIdSessionEntries; type OnNewValidatorSet = MmrLeaf; + type AncestryHelper = MmrLeaf; type WeightInfo = (); type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -1700,13 +1715,13 @@ mod benches { // the that path resolves correctly in the generated file. [polkadot_runtime_common::assigned_slots, AssignedSlots] [polkadot_runtime_common::auctions, Auctions] - [polkadot_runtime_common::coretime, Coretime] [polkadot_runtime_common::crowdloan, Crowdloan] [polkadot_runtime_common::claims, Claims] [polkadot_runtime_common::identity_migrator, IdentityMigrator] [polkadot_runtime_common::slots, Slots] [polkadot_runtime_common::paras_registrar, Registrar] [polkadot_runtime_parachains::configuration, Configuration] + [polkadot_runtime_parachains::coretime, Coretime] [polkadot_runtime_parachains::hrmp, Hrmp] [polkadot_runtime_parachains::disputes, ParasDisputes] [polkadot_runtime_parachains::inclusion, ParaInclusion] @@ -1764,7 +1779,7 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; XcmPallet::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -1777,11 +1792,11 @@ sp_api::impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -1796,7 +1811,7 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { XcmPallet::dry_run_call::(origin, call) } @@ -1806,6 +1821,18 @@ sp_api::impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationConverter, + >::convert_location(location) + } + } + impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) @@ -2023,7 +2050,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(3)] + #[api_version(4)] impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { pallet_beefy::GenesisBlock::::get() @@ -2033,7 +2060,7 @@ sp_api::impl_runtime_apis! { Beefy::validator_set() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -2043,7 +2070,7 @@ sp_api::impl_runtime_apis! { ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; - Beefy::submit_unsigned_equivocation_report( + Beefy::submit_unsigned_double_voting_report( equivocation_proof, key_owner_proof, ) diff --git a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs index 144e9d5b872382b7381e2d77c6fb08a8fbece4fa..06246ada72f16fb04623f3fc6534265cc1156b79 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs @@ -63,51 +63,6 @@ impl pallet_treasury::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `143` - // Estimated: `1489` - // Minimum execution time: 354_000_000 picoseconds. - Weight::from_parts(376_000_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3593` - // Minimum execution time: 547_000_000 picoseconds. - Weight::from_parts(550_000_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `470 + p * (8 ยฑ0)` - // Estimated: `3573` - // Minimum execution time: 104_000_000 picoseconds. - Weight::from_parts(121_184_402, 0) - .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 42_854 - .saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } /// Storage: Treasury Approvals (r:1 w:1) /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs index 9f275e7b8cdc24fdcb251ef95beca3eb2b81be25..abcc1893c29b1273bdde09a3f38f12c174bc84cc 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -50,6 +50,10 @@ pub struct WeightInfo(PhantomData); impl polkadot_runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) @@ -57,19 +61,23 @@ impl polkadot_runtime_parachains::assigner_on_demand::W /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `218 + s * (8 ยฑ0)` - // Estimated: `3681 + s * (8 ยฑ0)` - // Minimum execution time: 21_053_000 picoseconds. - Weight::from_parts(17_291_897, 0) - .saturating_add(Weight::from_parts(0, 3681)) - // Standard Error: 104 - .saturating_add(Weight::from_parts(18_779, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `270 + s * (8 ยฑ0)` + // Estimated: `3733 + s * (8 ยฑ0)` + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(28_146_882, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 140 + .saturating_add(Weight::from_parts(21_283, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) @@ -77,15 +85,15 @@ impl polkadot_runtime_parachains::assigner_on_demand::W /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `218 + s * (8 ยฑ0)` - // Estimated: `3681 + s * (8 ยฑ0)` - // Minimum execution time: 20_843_000 picoseconds. - Weight::from_parts(16_881_986, 0) - .saturating_add(Weight::from_parts(0, 3681)) - // Standard Error: 104 - .saturating_add(Weight::from_parts(18_788, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `270 + s * (8 ยฑ0)` + // Estimated: `3733 + s * (8 ยฑ0)` + // Minimum execution time: 28_680_000 picoseconds. + Weight::from_parts(31_024_579, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 119 + .saturating_add(Weight::from_parts(20_989, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs index 0ad32996c49595db1a414e4d0c0fc5961ea2d3e7..b2329c098cead5d2780b6ac44a1d6b5963efcaf8 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for `runtime_parachains::coretime` +//! Autogenerated weights for `runtime_common::coretime` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -45,28 +45,61 @@ use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; -use polkadot_runtime_parachains::configuration::{self, WeightInfo as ConfigWeightInfo}; - /// Weight functions for `runtime_common::coretime`. pub struct WeightInfo(PhantomData); -impl polkadot_runtime_parachains::coretime::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::coretime::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn request_revenue_at() -> Weight { + // Proof Size summary in bytes: + // Measured: `2963` + // Estimated: `6428` + // Minimum execution time: 36_613_000 picoseconds. + Weight::from_parts(37_637_000, 0) + .saturating_add(Weight::from_parts(0, 6428)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn request_core_count() -> Weight { - ::WeightInfo::set_config_with_u32() + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_527_000 picoseconds. + Weight::from_parts(7_784_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `CoreTimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoreTimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CoreTimeAssignmentProvider::CoreSchedules` (r:0 w:1) - /// Proof: `CoreTimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreSchedules` (r:0 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 100]`. fn assign_core(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3541` - // Minimum execution time: 6_275_000 picoseconds. - Weight::from_parts(6_883_543, 0) - .saturating_add(Weight::from_parts(0, 3541)) - // Standard Error: 202 - .saturating_add(Weight::from_parts(15_028, 0).saturating_mul(s.into())) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 9_220_000 picoseconds. + Weight::from_parts(9_905_773, 0) + .saturating_add(Weight::from_parts(0, 3645)) + // Standard Error: 257 + .saturating_add(Weight::from_parts(12_400, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index c4d78b1081a626effc136b3a3ee42acd4b514bdf..a66fceedae34158e70be869c1b4262fcde3d7a43 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -11,68 +11,68 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { workspace = true } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } -frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true } +sp-api = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-session = { workspace = true } +sp-version = { workspace = true } +frame-election-provider-support = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-block-builder = { workspace = true } -pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } -pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } -pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -pallet-staking-reward-curve = { path = "../../../substrate/frame/staking/reward-curve" } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -test-runtime-constants = { package = "test-runtime-constants", path = "constants", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } +pallet-authority-discovery = { workspace = true } +pallet-authorship = { workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +frame-executive = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-indices = { workspace = true } +pallet-offences = { workspace = true } +pallet-session = { workspace = true } +frame-support = { workspace = true } +pallet-staking = { workspace = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +test-runtime-constants = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-sudo = { workspace = true } +pallet-vesting = { workspace = true } -polkadot-runtime-common = { path = "../common", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } -pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -polkadot-runtime-parachains = { path = "../parachains", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-primitives = { workspace = true } +pallet-xcm = { workspace = true } +polkadot-runtime-parachains = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-trie = { path = "../../../substrate/primitives/trie" } +hex-literal = { workspace = true, default-features = true } +tiny-keccak = { features = ["keccak"], workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/runtime/test-runtime/constants/Cargo.toml b/polkadot/runtime/test-runtime/constants/Cargo.toml index ed10ece54f67c43a7b8aedfaebfb03452605cfe8..807774be7136961b5b2fa146cca50f4efc666fa7 100644 --- a/polkadot/runtime/test-runtime/constants/Cargo.toml +++ b/polkadot/runtime/test-runtime/constants/Cargo.toml @@ -10,11 +10,11 @@ license.workspace = true workspace = true [dependencies] -smallvec = "1.8.0" +smallvec = { workspace = true, default-features = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -polkadot-primitives = { path = "../../../primitives", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-support = { workspace = true } +polkadot-primitives = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 8178639946f8df0707ef39e2543a4f4dab8a2c0f..96392c026d5c9059ad55ff4a739f50c021eacbd7 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -53,6 +53,7 @@ use frame_support::{ }; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_session::historical as session_historical; +use pallet_timestamp::Now; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use polkadot_primitives::{ slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, @@ -1014,7 +1015,7 @@ sp_api::impl_runtime_apis! { None } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( _equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -1186,7 +1187,7 @@ sp_api::impl_runtime_apis! { impl crate::GetLastTimestamp for Runtime { fn get_last_timestamp() -> u64 { - Timestamp::now() + Now::::get() } } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index ccb8a02b981cc36782c796eae7b13081e0c985de..5a7805c05161be49ae94986b60101594422d4d89 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -11,123 +11,122 @@ license.workspace = true workspace = true [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +bitvec = { features = ["alloc"], workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } -rustc-hex = { version = "2.1.0", default-features = false } serde = { workspace = true } serde_derive = { optional = true, workspace = true } -smallvec = "1.8.0" +smallvec = { workspace = true, default-features = true } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } -binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } -sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", default-features = false } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true } +binary-merkle-tree = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-arithmetic = { workspace = true } +sp-std = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-io = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-version = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-block-builder = { workspace = true } +sp-npos-elections = { workspace = true } -frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental", "tuples-96"] } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -westend-runtime-constants = { package = "westend-runtime-constants", path = "constants", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } -pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } -pallet-bags-list = { path = "../../../substrate/frame/bags-list", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-beefy = { path = "../../../substrate/frame/beefy", default-features = false } -pallet-beefy-mmr = { path = "../../../substrate/frame/beefy-mmr", default-features = false } -pallet-collective = { path = "../../../substrate/frame/collective", default-features = false } -pallet-democracy = { path = "../../../substrate/frame/democracy", default-features = false } -pallet-elections-phragmen = { package = "pallet-elections-phragmen", path = "../../../substrate/frame/elections-phragmen", default-features = false } -pallet-election-provider-multi-phase = { path = "../../../substrate/frame/election-provider-multi-phase", default-features = false } -pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } -pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } -pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } -pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default-features = false } -pallet-multisig = { path = "../../../substrate/frame/multisig", default-features = false } -pallet-nomination-pools = { path = "../../../substrate/frame/nomination-pools", default-features = false } -pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } -pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } -pallet-preimage = { path = "../../../substrate/frame/preimage", default-features = false } -pallet-proxy = { path = "../../../substrate/frame/proxy", default-features = false } -pallet-recovery = { path = "../../../substrate/frame/recovery", default-features = false } -pallet-referenda = { path = "../../../substrate/frame/referenda", default-features = false } -pallet-scheduler = { path = "../../../substrate/frame/scheduler", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -pallet-society = { path = "../../../substrate/frame/society", default-features = false } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", path = "../../../substrate/frame/staking/reward-curve" } -pallet-staking-runtime-api = { path = "../../../substrate/frame/staking/runtime-api", default-features = false } -pallet-delegated-staking = { path = "../../../substrate/frame/delegated-staking", default-features = false } -pallet-state-trie-migration = { path = "../../../substrate/frame/state-trie-migration", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-nomination-pools-runtime-api = { path = "../../../substrate/frame/nomination-pools/runtime-api", default-features = false } -pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } -pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } -pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } -pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } +frame-election-provider-support = { workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { features = ["experimental", "tuples-96"], workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +westend-runtime-constants = { workspace = true } +pallet-asset-rate = { workspace = true } +pallet-authority-discovery = { workspace = true } +pallet-authorship = { workspace = true } +pallet-babe = { workspace = true } +pallet-bags-list = { workspace = true } +pallet-balances = { workspace = true } +pallet-beefy = { workspace = true } +pallet-beefy-mmr = { workspace = true } +pallet-collective = { workspace = true } +pallet-democracy = { workspace = true } +pallet-elections-phragmen = { workspace = true } +pallet-election-provider-multi-phase = { workspace = true } +pallet-fast-unstake = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-identity = { workspace = true } +pallet-indices = { workspace = true } +pallet-membership = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-mmr = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nomination-pools = { workspace = true } +pallet-conviction-voting = { workspace = true } +pallet-offences = { workspace = true } +pallet-preimage = { workspace = true } +pallet-proxy = { workspace = true } +pallet-recovery = { workspace = true } +pallet-referenda = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-session = { workspace = true } +pallet-society = { workspace = true } +pallet-staking = { workspace = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-staking-runtime-api = { workspace = true } +pallet-delegated-staking = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-nomination-pools-runtime-api = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } +pallet-vesting = { workspace = true } +pallet-whitelist = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +pallet-root-testing = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -pallet-election-provider-support-benchmarking = { path = "../../../substrate/frame/election-provider-support/benchmarking", default-features = false, optional = true } -pallet-nomination-pools-benchmarking = { path = "../../../substrate/frame/nomination-pools/benchmarking", default-features = false, optional = true } -pallet-offences-benchmarking = { path = "../../../substrate/frame/offences/benchmarking", default-features = false, optional = true } -pallet-session-benchmarking = { path = "../../../substrate/frame/session/benchmarking", default-features = false, optional = true } -hex-literal = { version = "0.4.1", optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +pallet-election-provider-support-benchmarking = { optional = true, workspace = true } +pallet-nomination-pools-benchmarking = { optional = true, workspace = true } +pallet-offences-benchmarking = { optional = true, workspace = true } +pallet-session-benchmarking = { optional = true, workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } -polkadot-runtime-common = { path = "../common", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -polkadot-runtime-parachains = { path = "../parachains", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-parachains = { workspace = true } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } +xcm-runtime-apis = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -sp-keyring = { path = "../../../substrate/primitives/keyring" } +hex-literal = { workspace = true, default-features = true } +tiny-keccak = { features = ["keccak"], workspace = true } +sp-keyring = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } -tokio = { version = "1.24.2", features = ["macros"] } -sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } +remote-externalities = { workspace = true, default-features = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +sp-tracing = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] @@ -201,7 +200,6 @@ std = [ "polkadot-primitives/std", "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", - "rustc-hex/std", "scale-info/std", "serde/std", "serde_derive", @@ -230,7 +228,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] runtime-benchmarks = [ @@ -288,7 +286,7 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "frame-election-provider-support/try-runtime", diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index d50b168fac52ee12709ece481375f73efdd878ac..f9b99ea5284d3322b6e0bf100f8314000a4b5bce 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -10,17 +10,17 @@ license.workspace = true workspace = true [dependencies] -smallvec = "1.8.0" +smallvec = { workspace = true, default-features = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -polkadot-primitives = { path = "../../../primitives", default-features = false } -polkadot-runtime-common = { path = "../../common", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +frame-support = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } +sp-core = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [features] default = ["std"] @@ -34,3 +34,6 @@ std = [ "xcm-builder/std", "xcm/std", ] + +# Set timing constants (e.g. session period) to faster versions to speed up testing. +fast-runtime = [] diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index 58048272e791a06b9169bf2ccf59a5e8d005c2d6..8d66ac2868d0b7a53011b8624c41161860b2cd42 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -116,6 +116,17 @@ pub mod system_parachain { /// All system parachains of Westend. pub type SystemParachains = IsChildSystemParachain; + + /// Coretime constants + pub mod coretime { + /// Coretime timeslice period in blocks + /// WARNING: This constant is used accross chains, so additional care should be taken + /// when changing it. + #[cfg(feature = "fast-runtime")] + pub const TIMESLICE_PERIOD: u32 = 20; + #[cfg(not(feature = "fast-runtime"))] + pub const TIMESLICE_PERIOD: u32 = 80; + } } /// Westend Treasury pallet instance. diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 2f2e01675563b365fc366274d5f9db26a5fc3e27..576b4bac4553693a18eb87d3aba6d465b1738cfe 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -29,7 +29,7 @@ use frame_support::{ traits::{ fungible::HoldConsideration, tokens::UnityOrOuterConversion, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, FromContains, InstanceFilter, KeyOwnerProofSystem, - LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, + LinearStoragePrice, ProcessMessage, ProcessMessageError, VariantCountOf, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, @@ -88,8 +88,8 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, IdentityLookup, - Keccak256, OpaqueKeys, SaturatedConversion, Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, + IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, @@ -105,7 +105,7 @@ use sp_version::RuntimeVersion; use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; use xcm_builder::PayOverXcm; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -122,7 +122,12 @@ use sp_runtime::traits::Get; pub use sp_runtime::BuildStorage; /// Constant values used within the runtime. -use westend_runtime_constants::{currency::*, fee::*, system_parachain::BROKER_ID, time::*}; +use westend_runtime_constants::{ + currency::*, + fee::*, + system_parachain::{coretime::TIMESLICE_PERIOD, BROKER_ID}, + time::*, +}; mod bag_thresholds; mod weights; @@ -154,7 +159,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, @@ -310,7 +315,7 @@ impl pallet_balances::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; } parameter_types! { @@ -323,6 +328,7 @@ impl pallet_beefy::Config for Runtime { type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = BeefySetIdSessionEntries; type OnNewValidatorSet = BeefyMmrLeaf; + type AncestryHelper = BeefyMmrLeaf; type WeightInfo = (); type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = @@ -658,9 +664,6 @@ impl pallet_fast_unstake::Config for Runtime { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 2000 * CENTS; - pub const ProposalBondMaximum: Balance = 1 * GRAND; pub const SpendPeriod: BlockNumber = 6 * DAYS; pub const Burn: Permill = Permill::from_perthousand(2); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); @@ -683,13 +686,8 @@ parameter_types! { impl pallet_treasury::Config for Runtime { type PalletId = TreasuryPalletId; type Currency = Balances; - type ApproveOrigin = EitherOfDiverse, Treasurer>; type RejectOrigin = EitherOfDiverse, Treasurer>; type RuntimeEvent = RuntimeEvent; - type OnSlash = Treasury; - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type ProposalBondMaximum = ProposalBondMaximum; type SpendPeriod = SpendPeriod; type Burn = Burn; type BurnDestination = (); @@ -1007,8 +1005,7 @@ impl InstanceFilter for ProxyType { matches!( c, RuntimeCall::Staking(..) | - RuntimeCall::Session(..) | - RuntimeCall::Utility(..) | + RuntimeCall::Session(..) | RuntimeCall::Utility(..) | RuntimeCall::FastUnstake(..) | RuntimeCall::VoterList(..) | RuntimeCall::NominationPools(..) @@ -1191,21 +1188,39 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; + pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } +pub struct BrokerPot; +impl Get for BrokerPot { + fn get() -> InteriorLocation { + Junction::AccountId32 { network: None, id: BrokerPalletId::get().into_account_truncating() } + .into() + } +} + impl coretime::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BrokerId = BrokerId; + type BrokerPotLocation = BrokerPot; type WeightInfo = weights::runtime_parachains_coretime::WeightInfo; type SendXcm = crate::xcm_config::XcmRouter; + type AssetTransactor = crate::xcm_config::LocalAssetTransactor; + type AccountToLocation = xcm_builder::AliasesIntoAccountId32< + xcm_config::ThisNetwork, + ::AccountId, + >; type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); + // Keep 2 timeslices worth of revenue information. + pub const MaxHistoricalRevenue: BlockNumber = 2 * TIMESLICE_PERIOD; + pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd"); } impl parachains_assigner_on_demand::Config for Runtime { @@ -1213,6 +1228,8 @@ impl parachains_assigner_on_demand::Config for Runtime { type Currency = Balances; type TrafficDefaultValue = OnDemandTrafficDefaultValue; type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; + type MaxHistoricalRevenue = MaxHistoricalRevenue; + type PalletId = OnDemandPalletId; } impl parachains_assigner_coretime::Config for Runtime {} @@ -1990,6 +2007,7 @@ sp_api::impl_runtime_apis! { } } + #[api_version(4)] impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { pallet_beefy::GenesisBlock::::get() @@ -1999,7 +2017,7 @@ sp_api::impl_runtime_apis! { Beefy::validator_set() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -2009,7 +2027,7 @@ sp_api::impl_runtime_apis! { ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; - Beefy::submit_unsigned_equivocation_report( + Beefy::submit_unsigned_double_voting_report( equivocation_proof, key_owner_proof, ) @@ -2227,7 +2245,7 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; XcmPallet::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -2240,11 +2258,11 @@ sp_api::impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -2259,7 +2277,7 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { XcmPallet::dry_run_call::(origin, call) } @@ -2269,6 +2287,18 @@ sp_api::impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationConverter, + >::convert_location(location) + } + } + impl pallet_nomination_pools_runtime_api::NominationPoolsApi< Block, AccountId, diff --git a/polkadot/runtime/westend/src/weights/pallet_treasury.rs b/polkadot/runtime/westend/src/weights/pallet_treasury.rs index 144e9d5b872382b7381e2d77c6fb08a8fbece4fa..06246ada72f16fb04623f3fc6534265cc1156b79 100644 --- a/polkadot/runtime/westend/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/westend/src/weights/pallet_treasury.rs @@ -63,51 +63,6 @@ impl pallet_treasury::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `143` - // Estimated: `1489` - // Minimum execution time: 354_000_000 picoseconds. - Weight::from_parts(376_000_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3593` - // Minimum execution time: 547_000_000 picoseconds. - Weight::from_parts(550_000_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `470 + p * (8 ยฑ0)` - // Estimated: `3573` - // Minimum execution time: 104_000_000 picoseconds. - Weight::from_parts(121_184_402, 0) - .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 42_854 - .saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } /// Storage: Treasury Approvals (r:1 w:1) /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs index 8b046f5d34ad7d3f0800b6d48f2cbb83cd5ab010..1bd9fa31b81b29fc581475991115a86c7e4ecf5d 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -50,6 +50,10 @@ pub struct WeightInfo(PhantomData); impl polkadot_runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) @@ -57,19 +61,23 @@ impl polkadot_runtime_parachains::assigner_on_demand::W /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `218 + s * (8 ยฑ0)` - // Estimated: `3681 + s * (8 ยฑ0)` - // Minimum execution time: 21_396_000 picoseconds. - Weight::from_parts(20_585_695, 0) - .saturating_add(Weight::from_parts(0, 3681)) - // Standard Error: 127 - .saturating_add(Weight::from_parts(20_951, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `270 + s * (8 ยฑ0)` + // Estimated: `3733 + s * (8 ยฑ0)` + // Minimum execution time: 29_427_000 picoseconds. + Weight::from_parts(26_756_913, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 121 + .saturating_add(Weight::from_parts(20_849, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) @@ -77,15 +85,15 @@ impl polkadot_runtime_parachains::assigner_on_demand::W /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `218 + s * (8 ยฑ0)` - // Estimated: `3681 + s * (8 ยฑ0)` - // Minimum execution time: 21_412_000 picoseconds. - Weight::from_parts(19_731_554, 0) - .saturating_add(Weight::from_parts(0, 3681)) - // Standard Error: 128 - .saturating_add(Weight::from_parts(21_055, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `270 + s * (8 ยฑ0)` + // Estimated: `3733 + s * (8 ยฑ0)` + // Minimum execution time: 29_329_000 picoseconds. + Weight::from_parts(26_415_340, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 129 + .saturating_add(Weight::from_parts(20_909, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs index 443651a6fda44380d89b02c53dc70338bdc7f4ee..9df382875f5f12ddee62751eeca5feb16dfece41 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::coretime` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,6 +48,28 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::coretime`. pub struct WeightInfo(PhantomData); impl polkadot_runtime_parachains::coretime::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn request_revenue_at() -> Weight { + // Proof Size summary in bytes: + // Measured: `2930` + // Estimated: `6395` + // Minimum execution time: 34_947_000 picoseconds. + Weight::from_parts(35_550_000, 0) + .saturating_add(Weight::from_parts(0, 6395)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } /// Storage: `Configuration::PendingConfigs` (r:1 w:1) /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) @@ -58,8 +80,8 @@ impl polkadot_runtime_parachains::coretime::WeightInfo // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_486_000 picoseconds. - Weight::from_parts(7_889_000, 0) + // Minimum execution time: 7_519_000 picoseconds. + Weight::from_parts(7_803_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -73,11 +95,11 @@ impl polkadot_runtime_parachains::coretime::WeightInfo // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 9_409_000 picoseconds. - Weight::from_parts(10_177_115, 0) + // Minimum execution time: 9_697_000 picoseconds. + Weight::from_parts(10_610_219, 0) .saturating_add(Weight::from_parts(0, 3612)) - // Standard Error: 259 - .saturating_add(Weight::from_parts(13_932, 0).saturating_mul(s.into())) + // Standard Error: 732 + .saturating_add(Weight::from_parts(10_364, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 7181afd9989ec9e0b00d1335bf35f2c5f2441836..53ea0b74463bc2466df2ff37f190f429d9b2973a 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -10,7 +10,7 @@ description = "Stores messages other authorities issue about candidates in Polka workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-core = { path = "../../substrate/primitives/core" } -polkadot-primitives = { path = "../primitives" } -gum = { package = "tracing-gum", path = "../node/gum" } +codec = { features = ["derive"], workspace = true } +sp-core = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index ad6d7259d2483e11e9df8c121bab75461a71c525..16205b0f51f57a62b923eff0952a45795c6cb3b6 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -10,9 +10,9 @@ description = "CLI to generate voter bags for Polkadot runtimes" workspace = true [dependencies] -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } -generate-bags = { path = "../../../substrate/utils/frame/generate-bags" } -sp-io = { path = "../../../substrate/primitives/io" } +generate-bags = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } -westend-runtime = { path = "../../runtime/westend" } +westend-runtime = { workspace = true } diff --git a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml index 20e4130f888bcb1f826dab81fb7e5a721a53a225..206ca8cf19a90df9bc2becd0c65039f35d714191 100644 --- a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml @@ -10,14 +10,14 @@ license.workspace = true workspace = true [dependencies] -westend-runtime = { path = "../../../runtime/westend" } -westend-runtime-constants = { path = "../../../runtime/westend/constants" } +westend-runtime = { workspace = true } +westend-runtime-constants = { workspace = true, default-features = true } -pallet-bags-list-remote-tests = { path = "../../../../substrate/frame/bags-list/remote-tests" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -frame-system = { path = "../../../../substrate/frame/system" } -sp-core = { path = "../../../../substrate/primitives/core" } +pallet-bags-list-remote-tests = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } -tokio = { version = "1.24.2", features = ["macros"] } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 690fb377dad78a8f6fb6d946fabab1ce68ba91df..72174bda2340c46c7769c320c646c11556daf74c 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -10,23 +10,23 @@ license.workspace = true workspace = true [dependencies] -array-bytes = "6.2.2" -bounded-collections = { version = "0.2.0", default-features = false, features = ["serde"] } -derivative = { version = "2.2.0", default-features = false, features = ["use_core"] } -impl-trait-for-tuples = "0.2.2" +array-bytes = { workspace = true, default-features = true } +bounded-collections = { features = ["serde"], workspace = true } +derivative = { features = ["use_core"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-weights = { features = ["serde"], workspace = true } serde = { features = ["alloc", "derive", "rc"], workspace = true } -schemars = { version = "0.8.13", default-features = true, optional = true } -xcm-procedural = { path = "procedural" } -environmental = { version = "1.1.4", default-features = false } +schemars = { default-features = true, optional = true, workspace = true } +xcm-procedural = { workspace = true, default-features = true } +environmental = { workspace = true } [dev-dependencies] -sp-io = { path = "../../substrate/primitives/io" } -hex = "0.4.3" -hex-literal = "0.4.1" +sp-io = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/xcm/docs/Cargo.toml b/polkadot/xcm/docs/Cargo.toml index 9820bd36dc0b1fb84fb6c4e43e4b1608308432a2..9d8f4c0a6430b71e2b2d1ba09719a8c397b4e4d5 100644 --- a/polkadot/xcm/docs/Cargo.toml +++ b/polkadot/xcm/docs/Cargo.toml @@ -10,30 +10,30 @@ publish = false [dependencies] # For XCM stuff -xcm = { path = "../../xcm", package = "staging-xcm" } -xcm-executor = { path = "../../xcm/xcm-executor", package = "staging-xcm-executor" } -xcm-builder = { path = "../../xcm/xcm-builder", package = "staging-xcm-builder" } -xcm-simulator = { path = "../../xcm/xcm-simulator" } -pallet-xcm = { path = "../../xcm/pallet-xcm" } +xcm = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } # For building FRAME runtimes -frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", features = ["experimental", "runtime"] } -codec = { package = "parity-scale-codec", version = "3.6.9" } -scale-info = { version = "2.6.0", default-features = false } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains" } -polkadot-primitives = { path = "../../../polkadot/primitives" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-io = { path = "../../../substrate/primitives/io" } +frame = { features = ["experimental", "runtime"], workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +scale-info = { workspace = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } # Some pallets -pallet-message-queue = { path = "../../../substrate/frame/message-queue" } -pallet-balances = { path = "../../../substrate/frame/balances" } +pallet-message-queue = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } # For building docs simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } -docify = "0.2.6" +docify = { workspace = true } [dev-dependencies] -test-log = "0.2.14" +test-log = { workspace = true } diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs index e3fdda2e733376ca3eb780acbc17256c12e6acac..e7d00ac71038b4f02d5172584a398f185864f46c 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs @@ -36,7 +36,7 @@ construct_runtime! { } } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; type AccountId = AccountId; @@ -49,8 +49,7 @@ impl mock_message_queue::Config for Runtime { type XcmExecutor = XcmExecutor; } -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; type AccountStore = System; } diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs index 25c35dd4aaa83c12b0176ea528d09f7d84bc87ce..686f86b37b73204e877ab3c935797ce92540816e 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs @@ -36,7 +36,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = IdentityLookup; @@ -44,7 +44,7 @@ impl frame_system::Config for Runtime { type AccountData = pallet_balances::AccountData; } -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type AccountStore = System; } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 8bf3b9abf66349834b3c2b7d7cd367e4b0b835d8..1177d094c6c38a1a3efe4f7bd76bde715d1a2ffd 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -13,29 +13,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false } -xcm = { package = "staging-xcm", path = "..", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +xcm-executor = { workspace = true } +frame-benchmarking = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } log = { workspace = true, default-features = true } [dev-dependencies] -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-assets = { path = "../../../substrate/frame/assets" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -xcm = { package = "staging-xcm", path = ".." } +pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } # temp -pallet-xcm = { path = "../pallet-xcm" } -polkadot-runtime-common = { path = "../../runtime/common" } +pallet-xcm = { workspace = true, default-features = true } +polkadot-runtime-common = { workspace = true, default-features = true } # westend-runtime = { path = "../../runtime/westend", features = ["runtime-benchmarks"] } -polkadot-primitives = { path = "../../primitives" } +polkadot-primitives = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 6f9b389ab6f12db50c27fab0f5f20961c7f171c6..0cce7a3449389875a7658f20e4012640e13cd394 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -10,32 +10,32 @@ license.workspace = true workspace = true [dependencies] -bounded-collections = { version = "0.2.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +bounded-collections = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -xcm = { package = "staging-xcm", path = "..", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } -xcm-fee-payment-runtime-api = { path = "../xcm-fee-payment-runtime-api", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } +xcm-runtime-apis = { workspace = true } # marked optional, used in benchmarking -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-balances = { optional = true, workspace = true } [dev-dependencies] -pallet-assets = { path = "../../../substrate/frame/assets" } -polkadot-runtime-parachains = { path = "../../runtime/parachains" } -polkadot-parachain-primitives = { path = "../../parachain" } +pallet-assets = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } [features] default = ["std"] @@ -55,7 +55,7 @@ std = [ "sp-std/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] runtime-benchmarks = [ @@ -69,7 +69,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 8f67e6e7d949693e25556af272ea73e443b620df..668f07c52ce35e8009d33abc3f1d021e07f85394 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -64,7 +64,7 @@ use xcm_executor::{ }, AssetsInHolding, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -1376,7 +1376,7 @@ pub mod pallet { /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the /// fee on the `dest` (and possibly reserve) chains. /// - `assets_transfer_type`: The XCM `TransferType` used to transfer the `assets`. - /// - `remote_fees_id`: One of the included `assets` to be be used to pay fees. + /// - `remote_fees_id`: One of the included `assets` to be used to pay fees. /// - `fees_transfer_type`: The XCM `TransferType` used to transfer the `fees` assets. /// - `custom_xcm_on_dest`: The XCM to be executed on `dest` chain as the last step of the /// transfer, which also determines what happens to the assets on the destination chain. @@ -2442,7 +2442,7 @@ impl Pallet { /// /// Returns not only the call result and events, but also the local XCM, if any, /// and any XCMs forwarded to other locations. - /// Meant to be used in the `xcm_fee_payment_runtime_api::dry_run::DryRunApi` runtime API. + /// Meant to be used in the `xcm_runtime_apis::dry_run::DryRunApi` runtime API. pub fn dry_run_call( origin: OriginCaller, call: RuntimeCall, @@ -2474,7 +2474,7 @@ impl Pallet { /// Dry-runs `xcm` with the given `origin_location`. /// /// Returns execution result, events, and any forwarded XCMs to other locations. - /// Meant to be used in the `xcm_fee_payment_runtime_api::dry_run::DryRunApi` runtime API. + /// Meant to be used in the `xcm_runtime_apis::dry_run::DryRunApi` runtime API. pub fn dry_run_xcm( origin_location: VersionedLocation, xcm: VersionedXcm, diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index ead98e1d046005360743b921fe3d70cd93c06a1b..2be6f301f85621489ab6986aaf300251fd239fbc 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -266,24 +266,13 @@ impl frame_system::Config for Test { parameter_types! { pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index ca9fb351bd3cad1f805106e405cfdcc496c9d8a8..a7db183bcdbf89b2b642f2eb8eea9f87019f086d 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -14,11 +14,11 @@ workspace = true proc-macro = true [dependencies] -proc-macro2 = "1.0.56" +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true } -Inflector = "0.11.4" +Inflector = { workspace = true } [dev-dependencies] -trybuild = { version = "1.0.88", features = ["diff"] } -xcm = { package = "staging-xcm", path = ".." } +trybuild = { features = ["diff"], workspace = true } +xcm = { workspace = true, default-features = true } diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs index 0a33d52580fca02166990c72258fa77bcfb21d45..09ead1389d19da15f167793b041b9ef849c0e741 100644 --- a/polkadot/xcm/procedural/src/builder_pattern.rs +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -233,6 +233,32 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result = data_enum + .variants + .iter() + .filter(|variant| variant.ident == "ClearOrigin") + .map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let method = match &variant.fields { + Fields::Unit => { + quote! { + #(#docs)* + pub fn #method_name(mut self) -> XcmBuilder { + self.instructions.push(#name::::#variant_name); + self + } + } + }, + _ => return Err(Error::new_spanned(variant, "ClearOrigin should have no fields")), + }; + Ok(method) + }) + .collect::, _>>()?; + // Then we require fees to be paid let buy_execution_method = data_enum .variants @@ -276,6 +302,7 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result XcmBuilder { + #(#allowed_after_load_holding_methods)* #buy_execution_method } }; diff --git a/polkadot/xcm/procedural/tests/builder_pattern.rs b/polkadot/xcm/procedural/tests/builder_pattern.rs index 96b16fb7e4565c2e3e891ccf8ee3ac2bedc68b0b..4202309bf3f71cb6a6a81968ad19eea2cc999515 100644 --- a/polkadot/xcm/procedural/tests/builder_pattern.rs +++ b/polkadot/xcm/procedural/tests/builder_pattern.rs @@ -79,3 +79,24 @@ fn default_builder_requires_buy_execution() { ]) ); } + +#[test] +fn default_builder_allows_clear_origin_before_buy_execution() { + let asset: Asset = (Here, 100u128).into(); + let beneficiary: Location = [0u8; 32].into(); + let message: Xcm<()> = Xcm::builder() + .receive_teleported_asset(asset.clone()) + .clear_origin() + .buy_execution(asset.clone(), Unlimited) + .deposit_asset(asset.clone(), beneficiary.clone()) + .build(); + assert_eq!( + message, + Xcm(vec![ + ReceiveTeleportedAsset(asset.clone().into()), + ClearOrigin, + BuyExecution { fees: asset.clone(), weight_limit: Unlimited }, + DepositAsset { assets: asset.into(), beneficiary }, + ]) + ); +} diff --git a/polkadot/xcm/src/v2/mod.rs b/polkadot/xcm/src/v2/mod.rs index 38e55d0ea51e5844640b2e9613f830f7cd636e7f..1afc120f500c65253e462762e2e69e6e3338210a 100644 --- a/polkadot/xcm/src/v2/mod.rs +++ b/polkadot/xcm/src/v2/mod.rs @@ -62,7 +62,10 @@ use super::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{ConstU32, WeakBoundedVec}; -use codec::{self, Decode, Encode, MaxEncodedLen}; +use codec::{ + self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, + MaxEncodedLen, +}; use core::{fmt::Debug, result}; use derivative::Derivative; use scale_info::TypeInfo; @@ -237,7 +240,7 @@ pub enum BodyPart { #[codec(compact)] denom: u32, }, - /// More than than the given proportion of members of the body. + /// More than the given proportion of members of the body. MoreThanProportion { #[codec(compact)] nom: u32, @@ -278,7 +281,7 @@ pub const VERSION: super::Version = 2; pub type QueryId = u64; /// DEPRECATED. Please use XCMv3 or XCMv4 instead. -#[derive(Derivative, Default, Encode, Decode, TypeInfo)] +#[derive(Derivative, Default, Encode, TypeInfo)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] #[codec(decode_bound())] @@ -286,6 +289,31 @@ pub type QueryId = u64; #[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct Xcm(pub Vec>); +environmental::environmental!(instructions_count: u8); + +impl Decode for Xcm { + fn decode(input: &mut I) -> core::result::Result { + instructions_count::using_once(&mut 0, || { + let number_of_instructions: u32 = >::decode(input)?.into(); + instructions_count::with(|count| { + *count = count.saturating_add(number_of_instructions as u8); + if *count > MAX_INSTRUCTIONS_TO_DECODE { + return Err(CodecError::from("Max instructions exceeded")) + } + Ok(()) + }) + .unwrap_or(Ok(()))?; + let decoded_instructions = decode_vec_with_len(input, number_of_instructions as usize)?; + Ok(Self(decoded_instructions)) + }) + } +} + +/// The maximal number of instructions in an XCM before decoding fails. +/// +/// This is a deliberate limit - not a technical one. +pub const MAX_INSTRUCTIONS_TO_DECODE: u8 = 100; + impl Xcm { /// Create an empty instance. pub fn new() -> Self { @@ -1157,3 +1185,38 @@ impl TryFrom> for Instruction(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize]); + let encoded = max_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); + + let big_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize + 1]); + let encoded = big_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + + let nested_xcm = Xcm::<()>(vec![ + DepositReserveAsset { + assets: All.into(), + dest: Here.into(), + xcm: max_xcm, + max_assets: 1, + }; + (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize + ]); + let encoded = nested_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + + let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); + let encoded = even_more_nested_xcm.encode(); + assert_eq!(encoded.len(), 345730); + // This should not decode since the limit is 100 + assert_eq!(MAX_INSTRUCTIONS_TO_DECODE, 100, "precondition"); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + } +} diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index aea4e03725159bcd08233353c60de6cde6d6b9ed..24348bf2e67213e9396a3120dfab8816695ba497 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -241,7 +241,7 @@ pub enum BodyPart { #[codec(compact)] denom: u32, }, - /// More than than the given proportion of members of the body. + /// More than the given proportion of members of the body. MoreThanProportion { #[codec(compact)] nom: u32, diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 79c601b98b4fd012202a8a4de00b2d254a7c8023..d43506aa651b4d719c42b8633649936036b04741 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -10,34 +10,34 @@ version = "7.0.0" workspace = true [dependencies] -impl-trait-for-tuples = "0.2.1" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -xcm = { package = "staging-xcm", path = "..", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../substrate/primitives/weights", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +impl-trait-for-tuples = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +sp-std = { workspace = true } +sp-arithmetic = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-transaction-payment = { workspace = true } log = { workspace = true } # Polkadot dependencies -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } +polkadot-parachain-primitives = { workspace = true } [dev-dependencies] -primitive-types = "0.12.1" -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-xcm = { path = "../pallet-xcm" } -pallet-salary = { path = "../../../substrate/frame/salary" } -pallet-assets = { path = "../../../substrate/frame/assets" } -polkadot-primitives = { path = "../../primitives" } -polkadot-runtime-parachains = { path = "../../runtime/parachains" } -assert_matches = "1.5.0" -polkadot-test-runtime = { path = "../../runtime/test-runtime" } +primitive-types = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } +pallet-salary = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +assert_matches = { workspace = true } +polkadot-test-runtime = { workspace = true } [features] default = ["std"] diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 10e9f4c6c0855a82dce07949cc53e2950971047b..18bde3aab485a9660892ab1f5dc7c3de4b508338 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -299,6 +299,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { (1, TreasuryAccountId::get(), INITIAL_BALANCE), (100, TreasuryAccountId::get(), INITIAL_BALANCE), ], + next_asset_id: None, } .assimilate_storage(&mut t) .unwrap(); diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 62b448a9f430c106516dd4f134ae38d9fc5db47b..582d596b78f1092397dc821dd9152f4295a350fd 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -17,7 +17,7 @@ use codec::Encode; use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{ConstU32, Everything, Nothing}, + traits::{Everything, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; @@ -102,24 +102,14 @@ impl frame_system::Config for Runtime { parameter_types! { pub ExistentialDeposit: Balance = 1 * CENTS; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl shared::Config for Runtime { diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 3b30b4f13e2dd498945767a6e6d4c87a43333bc1..7d1fc48692f9ac5bbf7d022ef4e0e718a82d257f 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -10,20 +10,20 @@ version = "7.0.0" workspace = true [dependencies] -impl-trait-for-tuples = "0.2.2" -environmental = { version = "1.1.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -xcm = { package = "staging-xcm", path = "..", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../substrate/primitives/weights", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -log = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +impl-trait-for-tuples = { workspace = true } +environmental = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +xcm = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } +frame-support = { workspace = true } +tracing = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } [features] default = ["std"] @@ -37,7 +37,6 @@ std = [ "environmental/std", "frame-benchmarking/std", "frame-support/std", - "log/std", "scale-info/std", "sp-arithmetic/std", "sp-core/std", @@ -45,5 +44,6 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-weights/std", + "tracing/std", "xcm/std", ] diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index 37c2117e7b06fc62f33e71c3c57164caf26d4c9b..e669e5d2b2312fbba56a86a6b8518506ba350543 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -11,24 +11,24 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system" } -futures = "0.3.30" -pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } -pallet-xcm = { path = "../../pallet-xcm" } -polkadot-test-client = { path = "../../../node/test/client" } -polkadot-test-runtime = { path = "../../../runtime/test-runtime" } -polkadot-test-service = { path = "../../../node/test/service" } -polkadot-service = { path = "../../../node/service" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } -xcm = { package = "staging-xcm", path = "../..", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = ".." } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-core = { path = "../../../../substrate/primitives/core" } +codec = { workspace = true, default-features = true } +frame-support = { workspace = true } +frame-system = { workspace = true, default-features = true } +futures = { workspace = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } +polkadot-test-client = { workspace = true } +polkadot-test-runtime = { workspace = true } +polkadot-test-service = { workspace = true } +polkadot-service = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index da9de93ca0f6f1a94f388d9078e95305e5c210e9..aeeb24dcf81243f317a43886a8ddd314d6de6300 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -208,9 +208,12 @@ impl ExecuteXcm for XcmExecutor Outcome { let origin = origin.into(); - log::trace!( + tracing::trace!( target: "xcm::execute", - "origin: {origin:?}, message: {message:?}, weight_credit: {weight_credit:?}", + ?origin, + ?message, + ?weight_credit, + "Executing message", ); let mut properties = Properties { weight_credit, message_id: None }; @@ -226,10 +229,13 @@ impl ExecuteXcm for XcmExecutor ExecuteXcm for XcmExecutor for frame_benchmarking::BenchmarkError { fn from(error: ExecutorError) -> Self { - log::error!( - "XCM ERROR >> Index: {:?}, Error: {:?}, Weight: {:?}", - error.index, - error.xcm_error, - error.weight + tracing::error!( + index = ?error.index, + xcm_error = ?error.xcm_error, + weight = ?error.weight, + "XCM ERROR", ); Self::Stop("xcm executor error: see error logs") } @@ -326,10 +332,12 @@ impl XcmExecutor { let mut weight_used = xcm_weight.saturating_sub(self.total_surplus); if !self.holding.is_empty() { - log::trace!( + tracing::trace!( target: "xcm::post_process", - "Trapping assets in holding register: {:?}, context: {:?} (original_origin: {:?})", - self.holding, self.context, self.original_origin, + holding_register = ?self.holding, + context = ?self.context, + original_origin = ?self.original_origin, + "Trapping assets in holding register", ); let effective_origin = self.context.origin.as_ref().unwrap_or(&self.original_origin); let trap_weight = @@ -342,7 +350,13 @@ impl XcmExecutor { // TODO: #2841 #REALWEIGHT We should deduct the cost of any instructions following // the error which didn't end up being executed. Some((_i, e)) => { - log::trace!(target: "xcm::post_process", "Execution errored at {:?}: {:?} (original_origin: {:?})", _i, e, self.original_origin); + tracing::trace!( + target: "xcm::post_process", + instruction = ?_i, + error = ?e, + original_origin = ?self.original_origin, + "Execution failed", + ); Outcome::Incomplete { used: weight_used, error: e } }, } @@ -363,8 +377,12 @@ impl XcmExecutor { msg: Xcm<()>, reason: FeeReason, ) -> Result { - log::trace!( - target: "xcm::send", "Sending msg: {msg:?}, to destination: {dest:?}, (reason: {reason:?})" + tracing::trace!( + target: "xcm::send", + ?msg, + destination = ?dest, + reason = ?reason, + "Sending msg", ); let (ticket, fee) = validate_send::(dest, msg)?; self.take_fee(fee, reason)?; @@ -400,7 +418,12 @@ impl XcmExecutor { // `holding_limit` items (which has a best case outcome of holding.len() == holding_limit), // then the operation is guaranteed to succeed. let worst_case_holding_len = self.holding.len() + assets_length; - log::trace!(target: "xcm::ensure_can_subsume_assets", "worst_case_holding_len: {:?}, holding_limit: {:?}", worst_case_holding_len, self.holding_limit); + tracing::trace!( + target: "xcm::ensure_can_subsume_assets", + ?worst_case_holding_len, + holding_limit = ?self.holding_limit, + "Ensuring subsume assets work", + ); ensure!(worst_case_holding_len <= self.holding_limit * 2, XcmError::HoldingWouldOverflow); Ok(()) } @@ -408,12 +431,12 @@ impl XcmExecutor { /// Refund any unused weight. fn refund_surplus(&mut self) -> Result<(), XcmError> { let current_surplus = self.total_surplus.saturating_sub(self.total_refunded); - log::trace!( + tracing::trace!( target: "xcm::refund_surplus", - "total_surplus: {:?}, total_refunded: {:?}, current_surplus: {:?}", - self.total_surplus, - self.total_refunded, - current_surplus, + total_surplus = ?self.total_surplus, + total_refunded = ?self.total_refunded, + ?current_surplus, + "Refunding surplus", ); if current_surplus.any_gt(Weight::zero()) { if let Some(w) = self.trader.refund_weight(current_surplus, &self.context) { @@ -426,7 +449,7 @@ impl XcmExecutor { .defensive_proof( "refund_weight returned an asset capable of buying weight; qed", ); - log::error!( + tracing::error!( target: "xcm::refund_surplus", "error: HoldingWouldOverflow", ); @@ -436,10 +459,9 @@ impl XcmExecutor { self.holding.subsume_assets(w.into()); } } - log::trace!( + tracing::trace!( target: "xcm::refund_surplus", - "total_refunded: {:?}", - self.total_refunded, + total_refunded = ?self.total_refunded, ); Ok(()) } @@ -448,13 +470,13 @@ impl XcmExecutor { if Config::FeeManager::is_waived(self.origin_ref(), reason.clone()) { return Ok(()) } - log::trace!( + tracing::trace!( target: "xcm::fees", - "taking fee: {:?} from origin_ref: {:?} in fees_mode: {:?} for a reason: {:?}", - fee, - self.origin_ref(), - self.fees_mode, - reason, + ?fee, + origin_ref = ?self.origin_ref(), + fees_mode = ?self.fees_mode, + ?reason, + "Taking fees", ); let paid = if self.fees_mode.jit_withdraw { let origin = self.origin_ref().ok_or(XcmError::BadOrigin)?; @@ -507,7 +529,7 @@ impl XcmExecutor { let reanchor_context = Config::UniversalLocation::get(); let reanchored = reanchorable.reanchored(&destination, &reanchor_context).map_err(|error| { - log::error!(target: "xcm::reanchor", "Failed reanchoring with error {error:?}"); + tracing::error!(target: "xcm::reanchor", ?error, "Failed reanchoring with error"); XcmError::ReanchorFailed })?; Ok((reanchored, reanchor_context)) @@ -530,13 +552,12 @@ impl XcmExecutor { } fn process(&mut self, xcm: Xcm) -> Result<(), ExecutorError> { - log::trace!( + tracing::trace!( target: "xcm::process", - "origin: {:?}, total_surplus/refunded: {:?}/{:?}, error_handler_weight: {:?}", - self.origin_ref(), - self.total_surplus, - self.total_refunded, - self.error_handler_weight, + origin = ?self.origin_ref(), + total_surplus = ?self.total_surplus, + total_refunded = ?self.total_refunded, + error_handler_weight = ?self.error_handler_weight, ); let mut result = Ok(()); for (i, instr) in xcm.0.into_iter().enumerate() { @@ -566,7 +587,7 @@ impl XcmExecutor { self.process_instruction(instr) }); if let Err(e) = inst_res { - log::trace!(target: "xcm::execute", "!!! ERROR: {:?}", e); + tracing::trace!(target: "xcm::execute", "!!! ERROR: {:?}", e); *r = Err(ExecutorError { index: i as u32, xcm_error: e, @@ -588,11 +609,12 @@ impl XcmExecutor { &mut self, instr: Instruction, ) -> Result<(), XcmError> { - log::trace!( + tracing::trace!( target: "xcm::process_instruction", - "=== {:?}", - instr + instruction = ?instr, + "Processing instruction", ); + match instr { WithdrawAsset(assets) => { let origin = self.origin_ref().ok_or(XcmError::BadOrigin)?; @@ -694,7 +716,7 @@ impl XcmExecutor { Transact { origin_kind, require_weight_at_most, mut call } => { // We assume that the Relay-chain is allowed to use transact on this parachain. let origin = self.cloned_origin().ok_or_else(|| { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", "No origin provided", ); @@ -704,7 +726,7 @@ impl XcmExecutor { // TODO: #2841 #TRANSACTFILTER allow the trait to issue filters for the relay-chain let message_call = call.take_decoded().map_err(|_| { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", "Failed to decode call", ); @@ -712,13 +734,14 @@ impl XcmExecutor { XcmError::FailedToDecode })?; - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Processing call: {message_call:?}", + ?call, + "Processing call", ); if !Config::SafeCallFilter::contains(&message_call) { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", "Call filtered by `SafeCallFilter`", ); @@ -729,26 +752,31 @@ impl XcmExecutor { let dispatch_origin = Config::OriginConverter::convert_origin(origin.clone(), origin_kind).map_err( |_| { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Failed to convert origin {origin:?} and origin kind {origin_kind:?} to a local origin." + ?origin, + ?origin_kind, + "Failed to convert origin to a local origin." ); XcmError::BadOrigin }, )?; - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Dispatching with origin: {dispatch_origin:?}", + origin = ?dispatch_origin, + "Dispatching with origin", ); let weight = message_call.get_dispatch_info().weight; if !weight.all_lte(require_weight_at_most) { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Max {weight} bigger than require at most {require_weight_at_most}", + %weight, + %require_weight_at_most, + "Max weight bigger than require at most", ); return Err(XcmError::MaxWeightInvalid) @@ -757,17 +785,19 @@ impl XcmExecutor { let maybe_actual_weight = match Config::CallDispatcher::dispatch(message_call, dispatch_origin) { Ok(post_info) => { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Dispatch successful: {post_info:?}" + ?post_info, + "Dispatch successful" ); self.transact_status = MaybeErrorCode::Success; post_info.actual_weight }, Err(error_and_info) => { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Dispatch failed {error_and_info:?}" + ?error_and_info, + "Dispatch failed" ); self.transact_status = error_and_info.error.encode().into(); diff --git a/polkadot/xcm/xcm-executor/src/traits/conversion.rs b/polkadot/xcm/xcm-executor/src/traits/conversion.rs index 9e2f4c83997ac2b370536822454bcfbea41c4896..1efe138b8c2116ff50e385943d787817173d6b95 100644 --- a/polkadot/xcm/xcm-executor/src/traits/conversion.rs +++ b/polkadot/xcm/xcm-executor/src/traits/conversion.rs @@ -88,19 +88,45 @@ pub trait ConvertOrigin { #[impl_trait_for_tuples::impl_for_tuples(30)] impl ConvertOrigin for Tuple { fn convert_origin(origin: impl Into, kind: OriginKind) -> Result { + let origin = origin.into(); + + tracing::trace!( + target: "xcm::convert_origin", + ?origin, + ?kind, + "Converting origin", + ); + for_tuples!( #( + let convert_origin = core::any::type_name::(); + let origin = match Tuple::convert_origin(origin, kind) { - Err(o) => o, - r => return r + Err(o) => { + tracing::trace!( + target: "xcm::convert_origin", + %convert_origin, + "Convert origin step failed", + ); + + o + }, + Ok(o) => { + tracing::trace!( + target: "xcm::convert_origin", + %convert_origin, + "Convert origin step succeeded", + ); + + return Ok(o) + } }; )* ); - let origin = origin.into(); - log::trace!( + + tracing::trace!( target: "xcm::convert_origin", - "could not convert: origin: {:?}, kind: {:?}", - origin, - kind, + "Converting origin failed", ); + Err(origin) } } diff --git a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs index e76d56bfe6164d16487f65ed79c1a1a58a9949fe..2e3acf5960ec1c4759945927ab37d8af2a35d279 100644 --- a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs +++ b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs @@ -59,19 +59,35 @@ impl ShouldExecute for Tuple { properties: &mut Properties, ) -> Result<(), ProcessMessageError> { for_tuples!( #( - match Tuple::should_execute(origin, instructions, max_weight, properties) { - Ok(()) => return Ok(()), - _ => (), + let barrier = core::any::type_name::(); + match Tuple::should_execute(origin, instructions, max_weight, properties) { + Ok(()) => { + tracing::trace!( + target: "xcm::should_execute", + ?origin, + ?instructions, + ?max_weight, + ?properties, + %barrier, + "pass barrier", + ); + return Ok(()) + }, + Err(error) => { + tracing::trace!( + target: "xcm::should_execute", + ?origin, + ?instructions, + ?max_weight, + ?properties, + ?error, + %barrier, + "did not pass barrier", + ); + }, } )* ); - log::trace!( - target: "xcm::should_execute", - "did not pass barrier: origin: {:?}, instructions: {:?}, max_weight: {:?}, properties: {:?}", - origin, - instructions, - max_weight, - properties, - ); + Err(ProcessMessageError::Unsupported) } } diff --git a/polkadot/xcm/xcm-executor/src/traits/token_matching.rs b/polkadot/xcm/xcm-executor/src/traits/token_matching.rs index e9a7e3ad845daf2f3f0a8da05c7d9d3d3711a291..45f3dc062a6d0a65b2c1b7e2007b4b5250f263bf 100644 --- a/polkadot/xcm/xcm-executor/src/traits/token_matching.rs +++ b/polkadot/xcm/xcm-executor/src/traits/token_matching.rs @@ -27,7 +27,7 @@ impl MatchesFungible for Tuple { for_tuples!( #( match Tuple::matches_fungible(a) { o @ Some(_) => return o, _ => () } )* ); - log::trace!(target: "xcm::matches_fungible", "did not match fungible asset: {:?}", &a); + tracing::trace!(target: "xcm::matches_fungible", asset = ?a, "did not match fungible asset"); None } } @@ -42,7 +42,7 @@ impl MatchesNonFungible for Tuple { for_tuples!( #( match Tuple::matches_nonfungible(a) { o @ Some(_) => return o, _ => () } )* ); - log::trace!(target: "xcm::matches_non_fungible", "did not match non-fungible asset: {:?}", &a); + tracing::trace!(target: "xcm::matches_non_fungible", asset = ?a, "did not match non-fungible asset"); None } } @@ -86,7 +86,7 @@ impl MatchesFungibles for Tuple { for_tuples!( #( match Tuple::matches_fungibles(a) { o @ Ok(_) => return o, _ => () } )* ); - log::trace!(target: "xcm::matches_fungibles", "did not match fungibles asset: {:?}", &a); + tracing::trace!(target: "xcm::matches_fungibles", asset = ?a, "did not match fungibles asset"); Err(Error::AssetNotHandled) } } @@ -101,7 +101,7 @@ impl MatchesNonFungibles for Tuple { for_tuples!( #( match Tuple::matches_nonfungibles(a) { o @ Ok(_) => return o, _ => () } )* ); - log::trace!(target: "xcm::matches_non_fungibles", "did not match fungibles asset: {:?}", &a); + tracing::trace!(target: "xcm::matches_non_fungibles", asset = ?a, "did not match fungibles asset"); Err(Error::AssetNotHandled) } } diff --git a/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs b/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs index e8a52d8256851b4baf9565ba5ddeb47ae28667a7..eb85af0b80f30e489d3fd4e5189db0c94fa429b1 100644 --- a/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs +++ b/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs @@ -148,12 +148,12 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::can_check_in", - "asset not found: what: {:?}, origin: {:?}, context: {:?}", - what, - origin, - context, + ?what, + ?origin, + ?context, + "asset not found", ); Err(XcmError::AssetNotFound) } @@ -171,12 +171,12 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::can_check_out", - "asset not found: what: {:?}, dest: {:?}, context: {:?}", - what, - dest, - context, + ?what, + ?dest, + ?context, + "asset not found", ); Err(XcmError::AssetNotFound) } @@ -194,12 +194,12 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::deposit_asset", - "did not deposit asset: what: {:?}, who: {:?}, context: {:?}", - what, - who, - context, + ?what, + ?who, + ?context, + "did not deposit asset", ); Err(XcmError::AssetNotFound) } @@ -215,12 +215,12 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::withdraw_asset", - "did not withdraw asset: what: {:?}, who: {:?}, maybe_context: {:?}", - what, - who, - maybe_context, + ?what, + ?who, + ?maybe_context, + "did not withdraw asset", ); Err(XcmError::AssetNotFound) } @@ -237,13 +237,13 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::internal_transfer_asset", - "did not transfer asset: what: {:?}, from: {:?}, to: {:?}, context: {:?}", - what, - from, - to, - context, + ?what, + ?from, + ?to, + ?context, + "did not transfer asset", ); Err(XcmError::AssetNotFound) } diff --git a/polkadot/xcm/xcm-executor/src/traits/weight.rs b/polkadot/xcm/xcm-executor/src/traits/weight.rs index efb9a2dfb6efdf65a074f631c691c0bfec88d600..985dfcd0cf2d78cfdc773299c1432e115b6323f2 100644 --- a/polkadot/xcm/xcm-executor/src/traits/weight.rs +++ b/polkadot/xcm/xcm-executor/src/traits/weight.rs @@ -80,18 +80,38 @@ impl WeightTrader for Tuple { let mut too_expensive_error_found = false; let mut last_error = None; for_tuples!( #( + let weight_trader = core::any::type_name::(); + match Tuple.buy_weight(weight, payment.clone(), context) { - Ok(assets) => return Ok(assets), - Err(e) => { - if let XcmError::TooExpensive = e { + Ok(assets) => { + tracing::trace!( + target: "xcm::buy_weight", + %weight_trader, + "Buy weight succeeded", + ); + + return Ok(assets) + }, + Err(error) => { + if let XcmError::TooExpensive = error { too_expensive_error_found = true; } - last_error = Some(e) + last_error = Some(error); + + tracing::trace!( + target: "xcm::buy_weight", + ?error, + %weight_trader, + "Weight trader failed", + ); } } )* ); - log::trace!(target: "xcm::buy_weight", "last_error: {:?}, too_expensive_error_found: {}", last_error, too_expensive_error_found); + tracing::trace!( + target: "xcm::buy_weight", + "Buy weight failed", + ); // if we have multiple traders, and first one returns `TooExpensive` and others fail e.g. // `AssetNotFound` then it is more accurate to return `TooExpensive` then `AssetNotFound` diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml deleted file mode 100644 index 6fa0236dfb41d5e2a4ec2749a402a8f73be60eb4..0000000000000000000000000000000000000000 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml +++ /dev/null @@ -1,71 +0,0 @@ -[package] -name = "xcm-fee-payment-runtime-api" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -repository.workspace = true -description = "XCM fee payment runtime API" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ - "derive", -] } - -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = [ - "derive", - "serde", -] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../substrate/primitives/weights", default-features = false } -xcm = { package = "staging-xcm", path = "../", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } - -[dev-dependencies] -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-xcm = { path = "../pallet-xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-assets = { path = "../../../substrate/frame/assets", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -log = { workspace = true } -env_logger = "0.9.0" - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-executive/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-assets/std", - "pallet-balances/std", - "pallet-xcm/std", - "scale-info/std", - "sp-api/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "sp-weights/std", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] -runtime-benchmarks = [ - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] diff --git a/polkadot/xcm/xcm-runtime-apis/Cargo.toml b/polkadot/xcm/xcm-runtime-apis/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..7d3f1a20b639a719db1fbc023dcc688eff6f2a52 --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/Cargo.toml @@ -0,0 +1,65 @@ +[package] +name = "xcm-runtime-apis" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +repository.workspace = true +description = "XCM runtime APIs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } + +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } +sp-weights = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } + +[dev-dependencies] +frame-system = { workspace = true } +sp-io = { workspace = true } +xcm-builder = { workspace = true } +hex-literal = { workspace = true } +pallet-xcm = { workspace = true } +pallet-balances = { workspace = true } +pallet-assets = { workspace = true } +xcm-executor = { workspace = true } +frame-executive = { workspace = true } +log = { workspace = true } +env_logger = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-executive/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-assets/std", + "pallet-balances/std", + "pallet-xcm/std", + "scale-info/std", + "sp-api/std", + "sp-io/std", + "sp-std/std", + "sp-weights/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] diff --git a/polkadot/xcm/xcm-runtime-apis/src/conversions.rs b/polkadot/xcm/xcm-runtime-apis/src/conversions.rs new file mode 100644 index 0000000000000000000000000000000000000000..d422664557e0eb94729a6876fc9001cf786d4b72 --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/src/conversions.rs @@ -0,0 +1,56 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Contains runtime APIs for useful conversions, such as between XCM `Location` and `AccountId`. + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use xcm::VersionedLocation; +use xcm_executor::traits::ConvertLocation; + +sp_api::decl_runtime_apis! { + /// API for useful conversions between XCM `Location` and `AccountId`. + pub trait LocationToAccountApi where AccountId: Decode { + /// Converts `Location` to `AccountId`. + fn convert_location(location: VersionedLocation) -> Result; + } +} + +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// Requested `Location` is not supported by the local conversion. + #[codec(index = 0)] + Unsupported, + + /// Converting a versioned data structure from one version to another failed. + #[codec(index = 1)] + VersionedConversionFailed, +} + +/// A helper implementation that can be used for `LocationToAccountApi` implementations. +/// It is useful when you already have a `ConvertLocation` implementation and a default +/// `Ss58Prefix`. +pub struct LocationToAccountHelper( + sp_std::marker::PhantomData<(AccountId, Conversion)>, +); +impl> + LocationToAccountHelper +{ + pub fn convert_location(location: VersionedLocation) -> Result { + let location = location.try_into().map_err(|_| Error::VersionedConversionFailed)?; + Conversion::convert_location(&location).ok_or(Error::Unsupported) + } +} diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs b/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs similarity index 100% rename from polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs rename to polkadot/xcm/xcm-runtime-apis/src/dry_run.rs diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/fees.rs b/polkadot/xcm/xcm-runtime-apis/src/fees.rs similarity index 100% rename from polkadot/xcm/xcm-fee-payment-runtime-api/src/fees.rs rename to polkadot/xcm/xcm-runtime-apis/src/fees.rs diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs b/polkadot/xcm/xcm-runtime-apis/src/lib.rs similarity index 74% rename from polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs rename to polkadot/xcm/xcm-runtime-apis/src/lib.rs index 616ee4c2eccb0aeed1cdb5e2aa524855d8b2df82..7b3b86214b3a14d3886b8dec2928d564358e370f 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs +++ b/polkadot/xcm/xcm-runtime-apis/src/lib.rs @@ -14,19 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Runtime APIs for estimating xcm fee payment. -//! This crate offers two APIs, one for estimating fees, -//! which can be used for any type of message, and another one -//! for returning the specific messages used for transfers, a common -//! feature. -//! Users of these APIs should call the transfers API and pass the result to the -//! fees API. +//! Various runtime APIs to support XCM processing and manipulation. #![cfg_attr(not(feature = "std"), no_std)] +/// Exposes runtime APIs for various XCM-related conversions. +pub mod conversions; + /// Dry-run API. /// Given an extrinsic or an XCM program, it returns the outcome of its execution. pub mod dry_run; + /// Fee estimation API. /// Given an XCM program, it will return the fees needed to execute it properly or send it. pub mod fees; diff --git a/polkadot/xcm/xcm-runtime-apis/tests/conversions.rs b/polkadot/xcm/xcm-runtime-apis/tests/conversions.rs new file mode 100644 index 0000000000000000000000000000000000000000..7f0f0923b092116e0ad10f4b287dda8c93f5357f --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/tests/conversions.rs @@ -0,0 +1,83 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +mod mock; + +use frame_support::{ + assert_err, assert_ok, + sp_runtime::{ + testing::H256, + traits::{IdentifyAccount, Verify}, + AccountId32, MultiSignature, + }, +}; +use mock::*; +use sp_api::ProvideRuntimeApi; +use xcm::prelude::*; +use xcm_runtime_apis::conversions::{ + Error as LocationToAccountApiError, LocationToAccountApi, LocationToAccountHelper, +}; + +#[test] +fn convert_location_to_account_works() { + sp_io::TestExternalities::default().execute_with(|| { + let client = TestClient {}; + let runtime_api = client.runtime_api(); + + // Test unknown conversion for `Here` location + assert_err!( + runtime_api + .convert_location(H256::zero(), VersionedLocation::from(Location::here())) + .unwrap(), + LocationToAccountApiError::Unsupported + ); + + // Test known conversion for sibling parachain location + assert_ok!( + runtime_api + .convert_location(H256::zero(), VersionedLocation::from((Parent, Parachain(1000)))) + .unwrap(), + 1000_u64 + ); + }) +} + +#[test] +fn location_to_account_helper_with_multi_signature_works() { + type Signature = MultiSignature; + type AccountIdForConversions = <::Signer as IdentifyAccount>::AccountId; + // We alias only `Location::parent()` + pub type LocationToAccountIdForConversions = + (xcm_builder::ParentIsPreset,); + + // Test unknown conversion for `Here` location + assert_err!( + LocationToAccountHelper::< + AccountIdForConversions, + LocationToAccountIdForConversions, + >::convert_location(Location::here().into_versioned()), + LocationToAccountApiError::Unsupported + ); + + // Test known conversion for `Parent` location + assert_ok!( + LocationToAccountHelper::< + AccountIdForConversions, + LocationToAccountIdForConversions, + >::convert_location(Location::parent().into_versioned()), + AccountId32::from(hex_literal::hex!("506172656e740000000000000000000000000000000000000000000000000000")) + ); +} diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs similarity index 99% rename from polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs rename to polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs index 33611c8a471c03a34c125a921b24a51880dfbb3f..59ee17973805621ce360546afb0b1583abfbdc9a 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs @@ -16,11 +16,11 @@ //! Tests for using both the XCM fee payment API and the dry-run API. +use frame_support::sp_runtime::testing::H256; use frame_system::RawOrigin; use sp_api::ProvideRuntimeApi; -use sp_runtime::testing::H256; use xcm::prelude::*; -use xcm_fee_payment_runtime_api::{dry_run::DryRunApi, fees::XcmPaymentApi}; +use xcm_runtime_apis::{dry_run::DryRunApi, fees::XcmPaymentApi}; mod mock; use mock::{ diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs similarity index 96% rename from polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs rename to polkadot/xcm/xcm-runtime-apis/tests/mock.rs index aa6c1422b608c955fc74934811f4f136212507cb..e723e254635659de94c489363260da84e27b440e 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs @@ -19,7 +19,11 @@ use codec::Encode; use frame_support::{ - construct_runtime, derive_impl, parameter_types, + construct_runtime, derive_impl, parameter_types, sp_runtime, + sp_runtime::{ + traits::{Dispatchable, Get, IdentityLookup, MaybeEquivalence, TryConvert}, + BuildStorage, SaturatedConversion, + }, traits::{ AsEnsureOriginWithArg, ConstU128, ConstU32, Contains, ContainsPair, Everything, Nothing, OriginTrait, @@ -28,10 +32,6 @@ use frame_support::{ }; use frame_system::{EnsureRoot, RawOrigin as SystemRawOrigin}; use pallet_xcm::TestWeightInfo; -use sp_runtime::{ - traits::{Dispatchable, Get, IdentityLookup, MaybeEquivalence, TryConvert}, - BuildStorage, SaturatedConversion, -}; use sp_std::{cell::RefCell, marker::PhantomData}; use xcm::{prelude::*, Version as XcmVersion}; use xcm_builder::{ @@ -44,7 +44,8 @@ use xcm_executor::{ XcmExecutor, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ + conversions::{Error as LocationToAccountApiError, LocationToAccountApi}, dry_run::{CallDryRunEffects, DryRunApi, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::{Error as XcmPaymentApiError, XcmPaymentApi}, }; @@ -352,6 +353,7 @@ impl pallet_xcm::Config for TestRuntime { type WeightInfo = TestWeightInfo; } +#[allow(dead_code)] pub fn new_test_ext_with_balances(balances: Vec<(AccountId, Balance)>) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); @@ -364,6 +366,7 @@ pub fn new_test_ext_with_balances(balances: Vec<(AccountId, Balance)>) -> sp_io: ext } +#[allow(dead_code)] pub fn new_test_ext_with_balances_and_assets( balances: Vec<(AccountId, Balance)>, assets: Vec<(AssetIdForAssetsPallet, AccountId, Balance)>, @@ -386,6 +389,7 @@ pub fn new_test_ext_with_balances_and_assets( (1, "Relay Token".into(), "RLY".into(), 12), ], accounts: assets, + next_asset_id: None, } .assimilate_storage(&mut t) .unwrap(); @@ -410,6 +414,14 @@ impl sp_api::ProvideRuntimeApi for TestClient { } sp_api::mock_impl_runtime_apis! { + impl LocationToAccountApi for RuntimeApi { + fn convert_location(location: VersionedLocation) -> Result { + let location = location.try_into().map_err(|_| LocationToAccountApiError::VersionedConversionFailed)?; + LocationToAccountId::convert_location(&location) + .ok_or(LocationToAccountApiError::Unsupported) + } + } + impl XcmPaymentApi for RuntimeApi { fn query_acceptable_payment_assets(xcm_version: XcmVersion) -> Result, XcmPaymentApiError> { Ok(vec![ diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index fc09b5e31861c0cb6470e7d7c121e8c94e1f60f8..c7caa49393ed5c7fa505b38d1a2eaac208d63465 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -10,20 +10,20 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -scale-info = { version = "2.6.0", default-features = false } -paste = "1.0.7" +codec = { workspace = true, default-features = true } +scale-info = { workspace = true } +paste = { workspace = true, default-features = true } -frame-support = { path = "../../../substrate/frame/support" } -frame-system = { path = "../../../substrate/frame/system" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } -xcm = { package = "staging-xcm", path = ".." } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor" } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder" } -polkadot-primitives = { path = "../../primitives" } -polkadot-core-primitives = { path = "../../core-primitives" } -polkadot-parachain-primitives = { path = "../../parachain" } -polkadot-runtime-parachains = { path = "../../runtime/parachains" } +xcm = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index 8b04170e3032f90adb6f4c3c5d3954729e009b20..e0aff9b7782a7e4fa8c73dc5cfaae51af2035533 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -10,29 +10,29 @@ version = "7.0.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -scale-info = { version = "2.11.1", features = ["derive"] } +codec = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } log = { workspace = true } -frame-system = { path = "../../../../substrate/frame/system" } -frame-support = { path = "../../../../substrate/frame/support" } -pallet-balances = { path = "../../../../substrate/frame/balances" } -pallet-message-queue = { path = "../../../../substrate/frame/message-queue" } -pallet-uniques = { path = "../../../../substrate/frame/uniques" } -sp-std = { path = "../../../../substrate/primitives/std" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-io = { path = "../../../../substrate/primitives/io" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +pallet-uniques = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } -xcm = { package = "staging-xcm", path = "../.." } -xcm-simulator = { path = ".." } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm-executor" } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm-builder" } -pallet-xcm = { path = "../../pallet-xcm" } -polkadot-core-primitives = { path = "../../../core-primitives" } -polkadot-runtime-parachains = { path = "../../../runtime/parachains" } -polkadot-parachain-primitives = { path = "../../../parachain" } +xcm = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } [features] default = [] diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index 6b3b4018d9fbb30862eee8fa5ea1408c96b2f68e..04f8ba115173465b14127cb277bcc74776ea1a5b 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -11,30 +11,30 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -honggfuzz = "0.5.55" -arbitrary = "1.3.2" -scale-info = { version = "2.11.1", features = ["derive"] } +codec = { workspace = true, default-features = true } +honggfuzz = { workspace = true } +arbitrary = { workspace = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } -frame-system = { path = "../../../../substrate/frame/system" } -frame-support = { path = "../../../../substrate/frame/support" } -frame-executive = { path = "../../../../substrate/frame/executive" } -frame-try-runtime = { path = "../../../../substrate/frame/try-runtime" } -pallet-balances = { path = "../../../../substrate/frame/balances" } -pallet-message-queue = { path = "../../../../substrate/frame/message-queue" } -sp-std = { path = "../../../../substrate/primitives/std" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-io = { path = "../../../../substrate/primitives/io" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-executive = { workspace = true, default-features = true } +frame-try-runtime = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } -xcm = { package = "staging-xcm", path = "../.." } -xcm-simulator = { path = ".." } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm-executor" } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm-builder" } -pallet-xcm = { path = "../../pallet-xcm" } -polkadot-core-primitives = { path = "../../../core-primitives" } -polkadot-runtime-parachains = { path = "../../../runtime/parachains" } -polkadot-parachain-primitives = { path = "../../../parachain" } +xcm = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } [features] try-runtime = [ diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index 502bcca2d44270263a45eeaf305b924d8b37c509..11435868d4682ae4229b7939f2a8685316c7e296 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -24,7 +24,6 @@ use frame_support::{ }; use frame_system::EnsureRoot; -use sp_core::ConstU32; use sp_runtime::{ generic, traits::{AccountIdLookup, BlakeTwo256, Hash, IdentifyAccount, Verify}, @@ -73,24 +72,13 @@ impl frame_system::Config for Runtime { parameter_types! { pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } parameter_types! { diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 4740aee83d870a0e260512ac15a982c515a1e803..459d2640b6d908b14e0964aa2e2a697f95c7dbd3 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -72,24 +72,13 @@ impl frame_system::Config for Runtime { parameter_types! { pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl shared::Config for Runtime { diff --git a/polkadot/zombienet_tests/assign-core.js b/polkadot/zombienet_tests/assign-core.js new file mode 100644 index 0000000000000000000000000000000000000000..5ddb86930f5a0a04380e1e4cbef5c96f81db4fe0 --- /dev/null +++ b/polkadot/zombienet_tests/assign-core.js @@ -0,0 +1,48 @@ +async function run(nodeName, networkInfo, args) { + const wsUri = networkInfo.nodesByName[nodeName].wsUri; + const api = await zombie.connect(wsUri); + + let core = Number(args[0]); + + let assignments = []; + + for (let i = 1; i < args.length; i += 2) { + let [para, parts] = [args[i], args[i + 1]]; + + console.log(`Assigning para ${para} to core ${core}`); + + assignments.push( + [{ task: para }, parts] + ); + } + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(core, 0, assignments, null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml index 83f5434edddb19afefcba93a9ce7bb305909c07f..611978a33a5f145274dd3c6c158e0de69a1c436a 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml +++ b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml @@ -7,11 +7,9 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] max_validators_per_core = 1 - scheduling_lookahead = 2 num_cores = 3 [relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] - needed_approvals = 3 max_approval_coalesce_count = 5 [relaychain] @@ -48,4 +46,4 @@ addToGenesis = true [types.Header] number = "u64" parent_hash = "Hash" -post_state = "Hash" \ No newline at end of file +post_state = "Hash" diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl index d624cbaf9df6a62448db2cef637e6d29a0d419b5..d47ef8f415f7ac9ca94b825de23580ab6131f013 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl @@ -11,8 +11,8 @@ elastic-validator-4: reports node_roles is 4 # Register 2 extra cores to this some-parachain. -elastic-validator-0: js-script ./assign-core.js with "2000,0" return is 0 within 600 seconds -elastic-validator-0: js-script ./assign-core.js with "2000,1" return is 0 within 600 seconds +elastic-validator-0: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds +elastic-validator-0: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds # Wait for 20 relay chain blocks elastic-validator-0: reports substrate_block_height{status="best"} is at least 20 within 600 seconds diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl index 900a3befbc6fc56f5200e9399f5fe5b21eb7af40..7ba896e1c90397642ff0736477f9c793bdb746ec 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl @@ -11,8 +11,8 @@ validator: reports substrate_block_height{status="finalized"} is at least 10 wit validator: parachain 2000 block height is at least 10 within 200 seconds # Register the second core assigned to this parachain. -alice: js-script ./assign-core.js with "2000,0" return is 0 within 600 seconds -alice: js-script ./assign-core.js with "2000,1" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js deleted file mode 100644 index add63b6d30859d2c50a38a73e33ec75ed3669433..0000000000000000000000000000000000000000 --- a/polkadot/zombienet_tests/elastic_scaling/assign-core.js +++ /dev/null @@ -1,39 +0,0 @@ -async function run(nodeName, networkInfo, args) { - const wsUri = networkInfo.nodesByName[nodeName].wsUri; - const api = await zombie.connect(wsUri); - - let para = Number(args[0]); - let core = Number(args[1]); - console.log(`Assigning para ${para} to core ${core}`); - - await zombie.util.cryptoWaitReady(); - - // account to submit tx - const keyring = new zombie.Keyring({ type: "sr25519" }); - const alice = keyring.addFromUri("//Alice"); - - await new Promise(async (resolve, reject) => { - const unsub = await api.tx.sudo - .sudo(api.tx.coretime.assignCore(core, 0, [[{ task: para }, 57600]], null)) - .signAndSend(alice, ({ status, isError }) => { - if (status.isInBlock) { - console.log( - `Transaction included at blockhash ${status.asInBlock}`, - ); - } else if (status.isFinalized) { - console.log( - `Transaction finalized at blockHash ${status.asFinalized}`, - ); - unsub(); - return resolve(); - } else if (isError) { - console.log(`Transaction error`); - reject(`Transaction error`); - } - }); - }); - - return 0; -} - -module.exports = { run }; diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js new file mode 120000 index 0000000000000000000000000000000000000000..eeb6402c06f5e52cedf150f924d6791beb1d9867 --- /dev/null +++ b/polkadot/zombienet_tests/elastic_scaling/assign-core.js @@ -0,0 +1 @@ +../assign-core.js \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml new file mode 100644 index 0000000000000000000000000000000000000000..fed30e0db05321631fdce66da858e1431ded64dd --- /dev/null +++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml @@ -0,0 +1,44 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 3 + allowed_ancestry_len = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + lookahead = 2 + num_cores = 4 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + needed_approvals = 3 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=debug,parachain::backing=trace,parachain::collator-protocol=trace,parachain::prospective-parachains=trace,runtime::parachains::scheduler=trace,runtime::inclusion-inherent=trace,runtime::inclusion=trace" ] + count = 4 + +{% for id in range(2000,2004) %} +[[parachains]] +id = {{id}} +register_para = false +onboard_as_parachain = false +add_to_genesis = false +chain = "glutton-westend-local-{{id}}" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator-{{id}}" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] + +{% endfor %} diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..b8b8887df85782268735f906eab771ddfcc22fcf --- /dev/null +++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl @@ -0,0 +1,16 @@ +Description: CT shared core test +Network: ./0015-coretime-shared-core.toml +Creds: config + +validator: reports node_roles is 4 + +# register paras 2 by 2 to speed up the test. registering all at once will exceed the weight limit. +validator-0: js-script ./0015-force-register-paras.js with "2000,2001" return is 0 within 600 seconds +validator-0: js-script ./0015-force-register-paras.js with "2002,2003" return is 0 within 600 seconds +# assign core 0 to be shared by all paras. +validator-0: js-script ./assign-core.js with "0,2000,14400,2001,14400,2002,14400,2003,14400" return is 0 within 600 seconds + +collator-2000: reports block height is at least 6 within 200 seconds +collator-2001: reports block height is at least 6 within 50 seconds +collator-2002: reports block height is at least 6 within 50 seconds +collator-2003: reports block height is at least 6 within 50 seconds diff --git a/polkadot/zombienet_tests/functional/0015-force-register-paras.js b/polkadot/zombienet_tests/functional/0015-force-register-paras.js new file mode 100644 index 0000000000000000000000000000000000000000..f82163b01105a4b7bee09d6827860b685c38890e --- /dev/null +++ b/polkadot/zombienet_tests/functional/0015-force-register-paras.js @@ -0,0 +1,63 @@ +async function run(nodeName, networkInfo, args) { + const init = networkInfo.nodesByName[nodeName]; + let wsUri = init.wsUri; + let userDefinedTypes = init.userDefinedTypes; + const api = await zombie.connect(wsUri, userDefinedTypes); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + let calls = []; + + for (let i = 0; i < args.length; i++) { + let para = args[i]; + const sec = networkInfo.nodesByName["collator-" + para]; + const api_collator = await zombie.connect(sec.wsUri, sec.userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // Get the genesis header and the validation code of the parachain + const genesis_header = await api_collator.rpc.chain.getHeader(); + const validation_code = await api_collator.rpc.state.getStorage("0x3A636F6465"); + + calls.push( + api.tx.paras.addTrustedValidationCode(validation_code.toHex()) + ); + calls.push( + api.tx.registrar.forceRegister( + alice.address, + 0, + Number(para), + genesis_header.toHex(), + validation_code.toHex(), + ) + ); + } + + const sudo_batch = api.tx.sudo.sudo(api.tx.utility.batch(calls)); + + await new Promise(async (resolve, reject) => { + const unsub = await sudo_batch + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/functional/assign-core.js b/polkadot/zombienet_tests/functional/assign-core.js new file mode 120000 index 0000000000000000000000000000000000000000..eeb6402c06f5e52cedf150f924d6791beb1d9867 --- /dev/null +++ b/polkadot/zombienet_tests/functional/assign-core.js @@ -0,0 +1 @@ +../assign-core.js \ No newline at end of file diff --git a/prdoc/1.13.0/pr_1223.prdoc b/prdoc/1.13.0/pr_1223.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..08b18557b70c6b649c49bb1dd62acc9528a79a86 --- /dev/null +++ b/prdoc/1.13.0/pr_1223.prdoc @@ -0,0 +1,13 @@ +title: Optimize storage append operation + +doc: + - audience: [Node Dev, Runtime Dev] + description: | + This pull request optimizes the storage append operation in the `OverlayedChanges`. + Before the internal buffer was cloned every time a new transaction was created. Cloning + the internal buffer is now only done when there is no other possibility. This should + improve the performance in situations like when depositing events from batched calls. + +crates: + - name: sp-state-machine + bump: major diff --git a/prdoc/pr_1644.prdoc b/prdoc/1.13.0/pr_1644.prdoc similarity index 100% rename from prdoc/pr_1644.prdoc rename to prdoc/1.13.0/pr_1644.prdoc diff --git a/prdoc/pr_3393.prdoc b/prdoc/1.13.0/pr_3393.prdoc similarity index 100% rename from prdoc/pr_3393.prdoc rename to prdoc/1.13.0/pr_3393.prdoc diff --git a/prdoc/pr_3905.prdoc b/prdoc/1.13.0/pr_3905.prdoc similarity index 100% rename from prdoc/pr_3905.prdoc rename to prdoc/1.13.0/pr_3905.prdoc diff --git a/prdoc/pr_3935.prdoc b/prdoc/1.13.0/pr_3935.prdoc similarity index 100% rename from prdoc/pr_3935.prdoc rename to prdoc/1.13.0/pr_3935.prdoc diff --git a/prdoc/pr_3952.prdoc b/prdoc/1.13.0/pr_3952.prdoc similarity index 100% rename from prdoc/pr_3952.prdoc rename to prdoc/1.13.0/pr_3952.prdoc diff --git a/prdoc/pr_4131.prdoc b/prdoc/1.13.0/pr_4131.prdoc similarity index 100% rename from prdoc/pr_4131.prdoc rename to prdoc/1.13.0/pr_4131.prdoc diff --git a/prdoc/pr_4198.prdoc b/prdoc/1.13.0/pr_4198.prdoc similarity index 100% rename from prdoc/pr_4198.prdoc rename to prdoc/1.13.0/pr_4198.prdoc diff --git a/prdoc/pr_4233.prdoc b/prdoc/1.13.0/pr_4233.prdoc similarity index 100% rename from prdoc/pr_4233.prdoc rename to prdoc/1.13.0/pr_4233.prdoc diff --git a/prdoc/pr_4249.prdoc b/prdoc/1.13.0/pr_4249.prdoc similarity index 100% rename from prdoc/pr_4249.prdoc rename to prdoc/1.13.0/pr_4249.prdoc diff --git a/prdoc/pr_4274.prdoc b/prdoc/1.13.0/pr_4274.prdoc similarity index 100% rename from prdoc/pr_4274.prdoc rename to prdoc/1.13.0/pr_4274.prdoc diff --git a/prdoc/pr_4339.prdoc b/prdoc/1.13.0/pr_4339.prdoc similarity index 100% rename from prdoc/pr_4339.prdoc rename to prdoc/1.13.0/pr_4339.prdoc diff --git a/prdoc/pr_4380.prdoc b/prdoc/1.13.0/pr_4380.prdoc similarity index 100% rename from prdoc/pr_4380.prdoc rename to prdoc/1.13.0/pr_4380.prdoc diff --git a/prdoc/pr_4392.prdoc b/prdoc/1.13.0/pr_4392.prdoc similarity index 100% rename from prdoc/pr_4392.prdoc rename to prdoc/1.13.0/pr_4392.prdoc diff --git a/prdoc/pr_4410.prdoc b/prdoc/1.13.0/pr_4410.prdoc similarity index 100% rename from prdoc/pr_4410.prdoc rename to prdoc/1.13.0/pr_4410.prdoc diff --git a/prdoc/pr_4418.prdoc b/prdoc/1.13.0/pr_4418.prdoc similarity index 100% rename from prdoc/pr_4418.prdoc rename to prdoc/1.13.0/pr_4418.prdoc diff --git a/prdoc/pr_4431.prdoc b/prdoc/1.13.0/pr_4431.prdoc similarity index 100% rename from prdoc/pr_4431.prdoc rename to prdoc/1.13.0/pr_4431.prdoc diff --git a/prdoc/pr_4444.prdoc b/prdoc/1.13.0/pr_4444.prdoc similarity index 100% rename from prdoc/pr_4444.prdoc rename to prdoc/1.13.0/pr_4444.prdoc diff --git a/prdoc/pr_4465.prdoc b/prdoc/1.13.0/pr_4465.prdoc similarity index 100% rename from prdoc/pr_4465.prdoc rename to prdoc/1.13.0/pr_4465.prdoc diff --git a/prdoc/pr_4471.prdoc b/prdoc/1.13.0/pr_4471.prdoc similarity index 100% rename from prdoc/pr_4471.prdoc rename to prdoc/1.13.0/pr_4471.prdoc diff --git a/prdoc/pr_4472.prdoc b/prdoc/1.13.0/pr_4472.prdoc similarity index 100% rename from prdoc/pr_4472.prdoc rename to prdoc/1.13.0/pr_4472.prdoc diff --git a/prdoc/pr_4475.prdoc b/prdoc/1.13.0/pr_4475.prdoc similarity index 100% rename from prdoc/pr_4475.prdoc rename to prdoc/1.13.0/pr_4475.prdoc diff --git a/prdoc/pr_4478.prdoc b/prdoc/1.13.0/pr_4478.prdoc similarity index 100% rename from prdoc/pr_4478.prdoc rename to prdoc/1.13.0/pr_4478.prdoc diff --git a/prdoc/pr_4503.prdoc b/prdoc/1.13.0/pr_4503.prdoc similarity index 100% rename from prdoc/pr_4503.prdoc rename to prdoc/1.13.0/pr_4503.prdoc diff --git a/prdoc/pr_4510.prdoc b/prdoc/1.13.0/pr_4510.prdoc similarity index 100% rename from prdoc/pr_4510.prdoc rename to prdoc/1.13.0/pr_4510.prdoc diff --git a/prdoc/pr_4514.prdoc b/prdoc/1.13.0/pr_4514.prdoc similarity index 100% rename from prdoc/pr_4514.prdoc rename to prdoc/1.13.0/pr_4514.prdoc diff --git a/prdoc/pr_4521.prdoc b/prdoc/1.13.0/pr_4521.prdoc similarity index 100% rename from prdoc/pr_4521.prdoc rename to prdoc/1.13.0/pr_4521.prdoc diff --git a/prdoc/pr_4533.prdoc b/prdoc/1.13.0/pr_4533.prdoc similarity index 100% rename from prdoc/pr_4533.prdoc rename to prdoc/1.13.0/pr_4533.prdoc diff --git a/prdoc/pr_4534.prdoc b/prdoc/1.13.0/pr_4534.prdoc similarity index 100% rename from prdoc/pr_4534.prdoc rename to prdoc/1.13.0/pr_4534.prdoc diff --git a/prdoc/pr_4537.prdoc b/prdoc/1.13.0/pr_4537.prdoc similarity index 100% rename from prdoc/pr_4537.prdoc rename to prdoc/1.13.0/pr_4537.prdoc diff --git a/prdoc/pr_4541.prdoc b/prdoc/1.13.0/pr_4541.prdoc similarity index 100% rename from prdoc/pr_4541.prdoc rename to prdoc/1.13.0/pr_4541.prdoc diff --git a/prdoc/pr_4542.prdoc b/prdoc/1.13.0/pr_4542.prdoc similarity index 100% rename from prdoc/pr_4542.prdoc rename to prdoc/1.13.0/pr_4542.prdoc diff --git a/prdoc/pr_4555.prdoc b/prdoc/1.13.0/pr_4555.prdoc similarity index 100% rename from prdoc/pr_4555.prdoc rename to prdoc/1.13.0/pr_4555.prdoc diff --git a/prdoc/pr_4571.prdoc b/prdoc/1.13.0/pr_4571.prdoc similarity index 100% rename from prdoc/pr_4571.prdoc rename to prdoc/1.13.0/pr_4571.prdoc diff --git a/prdoc/pr_4595.prdoc b/prdoc/1.13.0/pr_4595.prdoc similarity index 100% rename from prdoc/pr_4595.prdoc rename to prdoc/1.13.0/pr_4595.prdoc diff --git a/prdoc/pr_4621.prdoc b/prdoc/1.13.0/pr_4621.prdoc similarity index 100% rename from prdoc/pr_4621.prdoc rename to prdoc/1.13.0/pr_4621.prdoc diff --git a/prdoc/pr_4633.prdoc b/prdoc/1.13.0/pr_4633.prdoc similarity index 100% rename from prdoc/pr_4633.prdoc rename to prdoc/1.13.0/pr_4633.prdoc diff --git a/prdoc/pr_4634.prdoc b/prdoc/1.13.0/pr_4634.prdoc similarity index 100% rename from prdoc/pr_4634.prdoc rename to prdoc/1.13.0/pr_4634.prdoc diff --git a/prdoc/pr_4645.prdoc b/prdoc/1.13.0/pr_4645.prdoc similarity index 100% rename from prdoc/pr_4645.prdoc rename to prdoc/1.13.0/pr_4645.prdoc diff --git a/prdoc/pr_4646.prdoc b/prdoc/1.13.0/pr_4646.prdoc similarity index 100% rename from prdoc/pr_4646.prdoc rename to prdoc/1.13.0/pr_4646.prdoc diff --git a/prdoc/1.13.0/pr_4721.prdoc b/prdoc/1.13.0/pr_4721.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..730ac4d8308692001c20f30a89276af26a1b14ce --- /dev/null +++ b/prdoc/1.13.0/pr_4721.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Skip tree route calculation if no forks present + +doc: + - audience: Node Operator + description: | + Fixes an issue with synchronisation on parachains. Once they reached the tip of the chain, + nodes would show `Preparing 0.0 bps`. This is shown because the node is blocked on calculating + the tree route from genesis to the tip of the chain many times. This PR solves that by skipping + tree route calculation if there is only one leave. In addition, further optimizations have been + done to alleviate long finalization distances. + +crates: + - name: sp-blockchain + bump: minor + - name: sc-client-db + bump: none diff --git a/prdoc/1.14.0/pr_1631.prdoc b/prdoc/1.14.0/pr_1631.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f73d00968552a4b0c589a68079971d0dcac503bc --- /dev/null +++ b/prdoc/1.14.0/pr_1631.prdoc @@ -0,0 +1,39 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Upgrade libp2p to 0.52.4 + +doc: + - audience: [Node Dev, Node Operator] + description: | + Upgrade libp2p from 0.51.4 to 0.52.4 + +crates: + - name: sc-authority-discovery + bump: minor + - name: sc-cli + bump: minor + - name: sc-mixnet + bump: minor + - name: sc-network + bump: minor + - name: sc-network-gossip + bump: minor + - name: sc-network-common + bump: minor + - name: sc-network-light + bump: minor + - name: sc-network-statement + bump: minor + - name: sc-network-sync + bump: minor + - name: sc-network-test + bump: minor + - name: sc-network-transactions + bump: minor + - name: sc-network-types + bump: minor + - name: sc-offchain + bump: major + - name: sc-telemetry + bump: major diff --git a/prdoc/1.14.0/pr_3374.prdoc b/prdoc/1.14.0/pr_3374.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..76744f778db0a5bf5ba259ec2978d4b7e8d4e9b6 --- /dev/null +++ b/prdoc/1.14.0/pr_3374.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: removed `pallet::getter` from `pallet-timestamp` + +doc: + - audience: Runtime Dev + description: | + This PR removes all the `pallet::getter` usages from `pallet-timestamp`, and updates depdendant runtimes accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-timestamp \ No newline at end of file diff --git a/prdoc/pr_3679.prdoc b/prdoc/1.14.0/pr_3679.prdoc similarity index 100% rename from prdoc/pr_3679.prdoc rename to prdoc/1.14.0/pr_3679.prdoc diff --git a/prdoc/1.14.0/pr_3820.prdoc b/prdoc/1.14.0/pr_3820.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..33e8129df92a3c045f88531f40ee2d1793e591ab --- /dev/null +++ b/prdoc/1.14.0/pr_3820.prdoc @@ -0,0 +1,32 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove deprecated calls from treasury pallet + +doc: + - audience: Runtime User + description: | + This PR remove deprecated calls, relevant tests from `pallet-treasury`. + - Remove deprecated calls `propose_spend`, `reject_proposal`, `approve_proposal`. + - Replace the code flow of `propose_spend` then `approve_proposal` with `spend_local` + - Remove deprecated calls' related weight functions and test cases. + - Remove deprecated parameter types: ProposalBond, ProposalBondMaximum, ProposalBondMinimum + - Remove pallet treasury's relevant deprecated code in pallet-tips, pallet-bounties and pallet-child-bounties + +crates: + - name: pallet-treasury + bump: major + - name: pallet-tips + bump: patch + - name: pallet-child-bounties + bump: patch + - name: pallet-bounties + bump: patch + - name: polkadot-runtime-common + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch diff --git a/prdoc/1.14.0/pr_3828.prdoc b/prdoc/1.14.0/pr_3828.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..426625d5f23effb3fe889cacb767650d56921044 --- /dev/null +++ b/prdoc/1.14.0/pr_3828.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[FRAME] Remove storage migration type" + +doc: + - audience: Runtime Dev + description: | + Introduce migration type to remove data associated with a specific storage of a pallet. + +crates: + - name: frame-support + bump: minor diff --git a/prdoc/1.14.0/pr_3843.prdoc b/prdoc/1.14.0/pr_3843.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e01900dcc25b998124a2929f14171d049c2fa698 --- /dev/null +++ b/prdoc/1.14.0/pr_3843.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Introduce a new dispatchable function `set_partial_params` in `pallet-core-fellowship` + +doc: + - audience: Runtime Dev + description: | + This PR adds a new dispatchable function `set_partial_params` + to update config with multiple arguments without duplicating the + fields that does not need to update. + +crates: + - name: pallet-core-fellowship + bump: major + - name: collectives-westend-runtime + bump: patch diff --git a/prdoc/1.14.0/pr_3940.prdoc b/prdoc/1.14.0/pr_3940.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..590afa77bb1ed8ec41958d87279e7be969f55ade --- /dev/null +++ b/prdoc/1.14.0/pr_3940.prdoc @@ -0,0 +1,31 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "RFC-5: Add request revenue info" + +doc: + - audience: Runtime Dev + description: | + Partially implemented RFC-5 in terms of revenue requests and notifications + - audience: Runtime User + description: | + Instantaneous Coretime sold on the relay chain now generates revenue for its provider. + The revenue may be claimed by its provider on the Coretime chain. + +crates: + - name: polkadot-runtime-parachains + bump: minor + - name: rococo-runtime + bump: minor + - name: westend-runtime + bump: minor + - name: pallet-broker + bump: minor + - name: rococo-runtime-constants + bump: minor + - name: westend-runtime-constants + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor diff --git a/prdoc/1.14.0/pr_3951.prdoc b/prdoc/1.14.0/pr_3951.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3a8096e6f448748e090cf0732a7b453fafcb6239 --- /dev/null +++ b/prdoc/1.14.0/pr_3951.prdoc @@ -0,0 +1,30 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Pallet Assets Freezer + +doc: + - audience: Runtime Dev + description: | + This pallet is an extension of `pallet-assets`, supporting + freezes similar to `pallet-balances`. + To use this pallet, set `Freezer` of `pallet-assets` Config to the according instance of + `pallet-assets-freezer`. + - audience: Runtime User + description: | + The storage of this pallet contains a Vecs of account freezes. Applications UIs and Developer + Tools might benefit from observing it. + +crates: + - name: frame-support + bump: minor + - name: pallet-assets-freezer + bump: major + - name: pallet-assets + bump: patch + - name: pallet-balances + bump: patch + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor diff --git a/prdoc/1.14.0/pr_4513.prdoc b/prdoc/1.14.0/pr_4513.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e7363d211c1700a9ec3a4b38799ad73da0b13038 --- /dev/null +++ b/prdoc/1.14.0/pr_4513.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-elections-phragmen + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-elections-phragmen`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-elections-phragmen + bump: major diff --git a/prdoc/1.14.0/pr_4596.prdoc b/prdoc/1.14.0/pr_4596.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d47aa3aedfb85abcae3bd76f0d0abe31d544816b --- /dev/null +++ b/prdoc/1.14.0/pr_4596.prdoc @@ -0,0 +1,18 @@ +title: "Frame: `Consideration` trait generic over `Footprint` and handles zero cost" + +doc: + - audience: Runtime Dev + description: | + `Consideration` trait generic over `Footprint` and can handle zero cost for a give footprint. + + `Consideration` trait is generic over `Footprint` (currently defined over the type with the same name). This makes it possible to setup a custom footprint (e.g. current number of proposals in the storage). + + `Consideration::new` and `Consideration::update` return an `Option` instead `Self`, this make it possible to define no cost for a specific footprint (e.g. current number of proposals in the storage < max_proposal_count / 2). + +crates: + - name: frame-support + bump: major + - name: pallet-preimage + bump: major + - name: pallet-balances + bump: patch diff --git a/prdoc/1.14.0/pr_4618.prdoc b/prdoc/1.14.0/pr_4618.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3dd0fce81eeee54a411aef89442c233da204af68 --- /dev/null +++ b/prdoc/1.14.0/pr_4618.prdoc @@ -0,0 +1,20 @@ +title: Unify logic for fetching the `:code` of a block + +doc: + - audience: Node Operator + description: | + Fixes an issue on parachains when running with a custom `substitute` of the on chain wasm code + and having replaced the wasm code on the relay chain. The relay chain was rejecting blocks + build this way, because the collator was reporting the actual on chain wasm code hash + to the relay chain. However, the relay chain was expecting the code hash of the wasm code substitute + that was also registered on the relay chain. + - audience: Node Dev + description: | + `Client::code_at` will now use the same `substitute` to determine the code for a given block as it is + done when executing any runtime call. + +crates: + - name: cumulus-client-consensus-aura + bump: minor + - name: sc-service + bump: minor diff --git a/prdoc/pr_4662.prdoc b/prdoc/1.14.0/pr_4662.prdoc similarity index 100% rename from prdoc/pr_4662.prdoc rename to prdoc/1.14.0/pr_4662.prdoc diff --git a/prdoc/1.14.0/pr_4684.prdoc b/prdoc/1.14.0/pr_4684.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..b1c429c57822494cd11f75beea93c4dabf2764cb --- /dev/null +++ b/prdoc/1.14.0/pr_4684.prdoc @@ -0,0 +1,13 @@ +title: "Refactor of the parachain template" + +doc: + - audience: Runtime Dev + description: | + Introduce the construct runtime V2 to the parachain template runtime. In addition, url links in the parachain pallet + template now direct to the polkadot sdk docs. + +crates: + - name: pallet-parachain-template + bump: none + - name: parachain-template-runtime + bump: none diff --git a/prdoc/1.14.0/pr_4685.prdoc b/prdoc/1.14.0/pr_4685.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e212919ba2e5bcfede725c1afbdd27115dc5e229 --- /dev/null +++ b/prdoc/1.14.0/pr_4685.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Chain-spec-builder supports `codeSubstitutes`. + +doc: + - audience: Node Operator + description: | + A new subcommand `add-code-substitute` is available for the `chain-spec-builder` binary. It allows users to provide a runtime that should be used from a given + block onwards. The `codeSubstitutes` field in the chain spec is used to force usage of a given runtime at a given block until the next runtime upgrade. It can be + used to progress chains that are stalled due to runtime bugs that prevent block-building. However, parachain usage is only possible in combination with an updated + validation function on the relay chain. + +crates: + - name: staging-chain-spec-builder + bump: minor diff --git a/prdoc/pr_4691.prdoc b/prdoc/1.14.0/pr_4691.prdoc similarity index 100% rename from prdoc/pr_4691.prdoc rename to prdoc/1.14.0/pr_4691.prdoc diff --git a/prdoc/1.14.0/pr_4710.prdoc b/prdoc/1.14.0/pr_4710.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d7d31d817208a44402c53d2941750108c250f683 --- /dev/null +++ b/prdoc/1.14.0/pr_4710.prdoc @@ -0,0 +1,11 @@ +title: "Dont partially modify HRMP pages" + +doc: + - audience: Runtime Dev + description: | + The xcmp-queue pallet now does not partially modify a page anymore when the next message does + not fully fit into it but instead cleanly creates a new one. + +crates: + - name: cumulus-pallet-xcmp-queue + bump: patch diff --git a/prdoc/1.14.0/pr_4724.prdoc b/prdoc/1.14.0/pr_4724.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3723c2a70246ad40ad139c4da6e770fd386996f4 --- /dev/null +++ b/prdoc/1.14.0/pr_4724.prdoc @@ -0,0 +1,24 @@ +title: Fix core sharing and make use of scheduling_lookahead during backing + +doc: + - audience: Node Dev + description: | + Core sharing (two or more parachains scheduled on the same core with interlaced assignments) was not working correctly. + Adds the neccessary fixes to the backing subsystems. Moreover, adds support for backing collations which are built + and advertised ahead of time (with up to `scheduling_lookahead` relay chain blocks in advance). + +crates: + - name: polkadot-node-core-backing + bump: patch + - name: polkadot-node-core-prospective-parachains + bump: patch + - name: polkadot-collator-protocol + bump: patch + - name: polkadot-statement-distribution + bump: patch + - name: polkadot-node-subsystem-util + bump: minor + - name: polkadot-runtime-parachains + bump: none + - name: polkadot + bump: none diff --git a/prdoc/1.14.0/pr_4728.prdoc b/prdoc/1.14.0/pr_4728.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1494fbdbb2b9fb2a534e3fa47d13ee46996b180d --- /dev/null +++ b/prdoc/1.14.0/pr_4728.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Glutton - add support for bloating the parachain block length" + +doc: + - audience: [Runtime Dev, Runtime User] + description: | + Introduce a new configuration parameter `block_length` which can be configured via a call to + `set_block_length`. This sets the ration of the block length that is to be filled with trash. + This is implemented by an inherent that takes trash data as a parameter filling the block length. + +crates: + - name: pallet-glutton + bump: major + - name: glutton-westend-runtime + bump: major diff --git a/prdoc/1.14.0/pr_4730.prdoc b/prdoc/1.14.0/pr_4730.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9af14534bcbd28a600c1bb838ae6ff7f8512f563 --- /dev/null +++ b/prdoc/1.14.0/pr_4730.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: rpc upgrade jsonrpsee to v0.23.1 + +doc: + - audience: Node Dev + description: | + Upgrade the rpc library jsonrpsee to v0.23.1 to utilize: + + - Add Extensions which we now is using to get the connection id (used by the rpc spec v2) + - Update hyper to v1.0, http v1.0, soketto and related crates (hyper::service::make_service_fn is removed) + - The subscription API for the client is modified to know why a subscription was closed. + +crates: + - name: sc-rpc-spec-v2 + bump: patch + - name: sc-rpc + bump: patch + - name: sc-rpc-server + bump: patch + - name: cumulus-relay-chain-rpc-interface + bump: patch + - name: frame-remote-externalities + bump: patch diff --git a/prdoc/pr_4733.prdoc b/prdoc/1.14.0/pr_4733.prdoc similarity index 100% rename from prdoc/pr_4733.prdoc rename to prdoc/1.14.0/pr_4733.prdoc diff --git a/prdoc/1.14.0/pr_4756.prdoc b/prdoc/1.14.0/pr_4756.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..064a79fb06648a39c6983c92a80028fff093355d --- /dev/null +++ b/prdoc/1.14.0/pr_4756.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Do not make pallet-nfts benchmarks signature-dependent + +doc: + - audience: Runtime Dev + description: | + - Adds extra functionality to pallet-nfts's BenchmarkHelper to provide signers and sign message. + - Abstracts away the explicit link with Sr25519 schema in the benchmarks, allowing parachains with a different one to be able to run them and calculate the weights. + - Adds a default implementation for the empty tuple that leaves the code equivalent. + +crates: + - name: pallet-nfts + bump: minor diff --git a/prdoc/1.14.0/pr_4757.prdoc b/prdoc/1.14.0/pr_4757.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d94a20d7bb1a6ab4c79117f5f99f95eaaa1881d8 --- /dev/null +++ b/prdoc/1.14.0/pr_4757.prdoc @@ -0,0 +1,18 @@ +title: "pallet assets: optional auto-increment for the asset ID" + +doc: + - audience: Runtime Dev + description: | + Introduce an optional auto-increment setup for the IDs of new assets. + +crates: + - name: pallet-assets + bump: major + - name: staging-xcm-builder + bump: patch + - name: staging-xcm + bump: patch + - name: pallet-assets-freezer + bump: patch + - name: pallet-contracts + bump: patch diff --git a/prdoc/1.14.0/pr_4765.prdoc b/prdoc/1.14.0/pr_4765.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f64b2fdc51ab12c59aac8a247bf69026dffb9bf1 --- /dev/null +++ b/prdoc/1.14.0/pr_4765.prdoc @@ -0,0 +1,18 @@ +title: CheckWeight - account for extrinsic len as proof size + +doc: + - audience: Runtime Dev + description: | + This changes how CheckWeight extension works. It will now account for the extrinsic length + as proof size. When `on_idle` is called, the remaining weight parameter reflects this. + +crates: + - name: frame-system + bump: patch + - name: frame-executive + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + + + diff --git a/prdoc/1.14.0/pr_4769.prdoc b/prdoc/1.14.0/pr_4769.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e9691ba6f89744f073f8b0e794706e755aa59f0a --- /dev/null +++ b/prdoc/1.14.0/pr_4769.prdoc @@ -0,0 +1,20 @@ +title: Use real rust type for pallet alias in `runtime` macro + +doc: + - audience: Runtime Dev + description: | + This PR adds the ability to use a real rust type for pallet alias in the new `runtime` macro: + ```rust + #[runtime::pallet_index(0)] + pub type System = frame_system::Pallet; + ``` + + Please note that the current syntax still continues to be supported. + +crates: + - name: frame-support-procedural + bump: patch + - name: frame-support + bump: patch + - name: minimal-template-runtime + bump: patch diff --git a/prdoc/1.14.0/pr_4799.prdoc b/prdoc/1.14.0/pr_4799.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c4e68e316c22f8d805aceb20910f31dd3f7703fe --- /dev/null +++ b/prdoc/1.14.0/pr_4799.prdoc @@ -0,0 +1,24 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "network: Upgrade `litep2p` to v0.6.0" + +doc: + - audience: Node Operator + description: | + This PR brings the latest `litep2p` v0.6.0 to polkadot-sdk with stability improvements, + security fixes, and performance optimizations. + + Specifically: + - Incoming DHT records are now validated also with experimental litep2p network backend. + - Performance of TCP & WebSocket connections improved by setting `TCP_NODELAY` flag. + - Stability of secondary connection establishment improved. + - Multiple possible panics in litep2p library eliminated. + +crates: + - name: sc-authority-discovery + bump: patch + - name: sc-network + bump: patch + - name: sc-network-types + bump: patch diff --git a/prdoc/1.14.0/pr_4802.prdoc b/prdoc/1.14.0/pr_4802.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5757c4cbae184444d4eb78b620bcef50a3b0e133 --- /dev/null +++ b/prdoc/1.14.0/pr_4802.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add `health/readiness endpoint` to the rpc server + +doc: + - audience: Node Operator + description: | + Add `/health/readiness endpoint` to the rpc server which returns HTTP status code 200 if the chain is synced + and can connect to the rest of the network otherwise status code 500 is returned. + The endpoint can be reached by performing a HTTP GET request to the + endpoint such as `$ curl /health/readiness` + +crates: + - name: sc-rpc-server + bump: patch diff --git a/prdoc/1.14.0/pr_4807.prdoc b/prdoc/1.14.0/pr_4807.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..b60bfb524510a17f50df19fbbb985dc0d263a9d6 --- /dev/null +++ b/prdoc/1.14.0/pr_4807.prdoc @@ -0,0 +1,11 @@ +title: "pallet ranked collective: max member count per rank" + +doc: + - audience: Runtime Dev + description: | + Configuration for the maximum member count per rank, with the option for no limit. + +crates: + - name: pallet-ranked-collective + bump: major + diff --git a/prdoc/1.14.0/pr_4823.prdoc b/prdoc/1.14.0/pr_4823.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a498b33f7bfa949078ca23ac196116b7f5b2f1f9 --- /dev/null +++ b/prdoc/1.14.0/pr_4823.prdoc @@ -0,0 +1,11 @@ +title: "`pallet-referenda`: Ensure to schedule referendas earliest at the next block" + +doc: + - audience: Runtime User + description: | + Ensure that referendas are scheduled earliest at the next block when they are enacted. + Otherwise the scheduling may fails and thus, the enactment of the referenda. + +crates: + - name: pallet-referenda + bump: patch diff --git a/prdoc/1.14.0/pr_4831.prdoc b/prdoc/1.14.0/pr_4831.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..8629ead6e81d80306bb69cf57ed14cea49313010 --- /dev/null +++ b/prdoc/1.14.0/pr_4831.prdoc @@ -0,0 +1,25 @@ +title: "treasury pallet: - remove unused config parameters" + +doc: + - audience: Runtime Dev + description: | + Remove unused config parameters `ApproveOrigin` and `OnSlash` from the treasury pallet. + Add `OnSlash` config parameter to the bounties and tips pallets. + +crates: + - name: pallet-treasury + bump: major + - name: pallet-bounties + bump: major + - name: pallet-tips + bump: major + - name: collectives-westend-runtime + bump: patch + - name: polkadot-runtime-common + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: kitchensink-runtime + bump: patch diff --git a/prdoc/1.14.0/pr_4833.prdoc b/prdoc/1.14.0/pr_4833.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a5cf853696eee38fc2659a3357eb7be7fe5f860a --- /dev/null +++ b/prdoc/1.14.0/pr_4833.prdoc @@ -0,0 +1,12 @@ +title: "Reinitialize should allow to override existing config in collationGeneration" + +doc: + - audience: Node Dev + description: | + The Reinitialize collationGeneration subsystem message currently fails if no other config is already set. + As it is difficult to query the collationGeneration subsystem to check when to call Initialize or Reinitialize, this PR + proposes that Reinitialize overrides the configuration regardless if there was one already set. + +crates: + - name: polkadot-node-collation-generation + bump: minor diff --git a/prdoc/1.14.0/pr_4844.prdoc b/prdoc/1.14.0/pr_4844.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..999e63c84ed9a0491c6b4683a5ed3d5b25efa8c2 --- /dev/null +++ b/prdoc/1.14.0/pr_4844.prdoc @@ -0,0 +1,34 @@ +title: Make `Verifier::verify` and `BlockImport::check_block` use `&self` instead of `&mut self` + +doc: + - audience: Node Dev + description: | + `Verifier::verify` and `BlockImport::check_block` were refactored to use `&self` instead of `&mut self` + because there is no fundamental requirement for those operations to be exclusive in nature. + +crates: +- name: sc-consensus + bump: major + validate: false +- name: sc-consensus-aura + bump: major +- name: sc-consensus-babe + bump: major +- name: sc-consensus-beefy + bump: major +- name: sc-consensus-grandpa + bump: major +- name: sc-consensus-manual-seal + bump: major +- name: sc-consensus-pow + bump: major +- name: sc-service + bump: major +- name: cumulus-client-consensus-common + bump: major +- name: cumulus-client-consensus-aura + bump: major +- name: cumulus-client-consensus-relay-chain + bump: major +- name: polkadot-parachain-bin + validate: false diff --git a/prdoc/1.14.0/pr_4857.prdoc b/prdoc/1.14.0/pr_4857.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d515e4257622e046c42b4de2638541397463374a --- /dev/null +++ b/prdoc/1.14.0/pr_4857.prdoc @@ -0,0 +1,50 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[xcm] runtime api for LocationToAccount conversions" + +doc: + - audience: Runtime Dev + description: | + Introduces a new runtime API to help with conversions of XCM `Location` to the runtime's `AccountId`, + showing an Ss58 formatted address for easier verification. + + Besides that, the `xcm-fee-payment-runtime-api` module was merged into the new `xcm-runtime-apis`. + If you are using the `xcm-fee-payment-runtime-api` dependency, you just need to change it to `xcm-runtime-apis` + and update the imports from `use xcm_fee_payment_runtime_api::*` to `use xcm_runtime_apis::*`. + +crates: + - name: xcm-runtime-apis + bump: none + - name: polkadot-sdk + bump: patch + - name: pallet-xcm + bump: patch + - name: polkadot-service + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch + - name: people-rococo-runtime + bump: patch + - name: people-westend-runtime + bump: patch + - name: penpal-runtime + bump: patch + - name: contracts-rococo-runtime + bump: patch + - name: coretime-rococo-runtime + bump: patch + - name: coretime-westend-runtime + bump: none diff --git a/prdoc/1.14.0/pr_4865.prdoc b/prdoc/1.14.0/pr_4865.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..48ffd04219cf5ab48bb45bae7449a0176d28ec7e --- /dev/null +++ b/prdoc/1.14.0/pr_4865.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement trait `ContainsLengthBound` for pallet-membership + +doc: + - audience: Runtime Dev + description: | + Implement trait ContainsLengthBound for pallet membership otherwise we can't use it with pallet-tips without wrapper + +crates: + - name: pallet-membership + bump: minor diff --git a/prdoc/1.14.0/pr_4877.prdoc b/prdoc/1.14.0/pr_4877.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ede536aee450257aaff732870f01d1dfa9106e49 --- /dev/null +++ b/prdoc/1.14.0/pr_4877.prdoc @@ -0,0 +1,13 @@ +title: "Core-Fellowship: new promote_fast call" + +doc: + - audience: Runtime User + description: | + Adds the ability to quickly promote someone within a collective by bypassing the promotion + cooldown. This can help in special situations and comes with a new origin: `FastPromoteOrigin`. + +crates: + - name: pallet-core-fellowship + bump: major + - name: collectives-westend-runtime + bump: major diff --git a/prdoc/pr_3286.prdoc b/prdoc/pr_3286.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6ec3f6552a4a7369d1881e621e3070460d0b344f --- /dev/null +++ b/prdoc/pr_3286.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Assets: can_decrease/increase for destroying asset is not successful" + +doc: + - audience: Runtime Dev + description: | + Functions `can_decrease` and `can_increase` do not return successful consequence results + for assets undergoing destruction; instead, they return the `UnknownAsset` consequence variant. + This update aligns their behavior with similar functions, such as `reducible_balance`, + `increase_balance`, `decrease_balance`, and `burn`, which return an `AssetNotLive` error + for assets in the process of being destroyed. + +crates: + - name: pallet-assets diff --git a/prdoc/pr_4097.prdoc b/prdoc/pr_4097.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2804a9571c79e5449111affb2e3117f635e0051d --- /dev/null +++ b/prdoc/pr_4097.prdoc @@ -0,0 +1,45 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Introduce experimental slot-based collator + +doc: + - audience: Node Operator + description: | + Introduces an experimental collator that is fit fot elastic-scaling. + It can be activated on `test-parachain` and `polkadot-parachain` binaries via + `--experimental-use-slot-based` flag. The current implementation is MVP status and purely + for testing. Behaviour can change any time and should not be relied upon in environments with + any stability requirements. + +crates: + - name: cumulus-client-consensus-aura + bump: major + - name: cumulus-client-consensus-common + bump: minor + - name: cumulus-client-pov-recovery + bump: none + validate: false + - name: cumulus-pallet-aura-ext + bump: patch + - name: cumulus-relay-chain-interface + bump: major + validate: false + - name: sc-consensus-slots + bump: minor + - name: sc-basic-authorship + bump: patch + - name: cumulus-client-network + bump: none + validate: false + - name: cumulus-relay-chain-inprocess-interface + bump: minor + - name: sc-consensus-aura + bump: patch + - name: cumulus-relay-chain-rpc-interface + bump: minor + - name: polkadot-parachain-bin + bump: patch + - name: polkadot + bump: none + validate: false diff --git a/prdoc/pr_4522.prdoc b/prdoc/pr_4522.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c8fdcfa51a419665ea00ad37051994e73089295b --- /dev/null +++ b/prdoc/pr_4522.prdoc @@ -0,0 +1,39 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Added runtime support for reporting BEEFY fork voting + +doc: + - audience: + - Runtime Dev + - Runtime User + description: | + This PR adds the `report_fork_voting`, `report_future_voting` extrinsics to `pallet-beefy` + and renames the `report_equivocation` extrinsic to `report_double_voting`. + `report_fork_voting` can't be called yet, since it uses `Weight::MAX` weight. We will + add benchmarks for it and set the proper weight in a future PR. + Also a new `AncestryHelper` associated trait was added to `pallet_beefy::Config`. + - audience: Node Dev + description: | + This PR renames the `submit_report_equivocation_unsigned_extrinsic` in `BeefyApi` to + `submit_report_double_voting_unsigned_extrinsic`and bumps the `BeefyApi` version from 3 to 4. + +crates: + - name: pallet-beefy + bump: major + - name: pallet-beefy-mmr + bump: minor + - name: pallet-mmr + bump: major + - name: sc-consensus-beefy + bump: patch + - name: kitchensink-runtime + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: sp-consensus-beefy + bump: major + - name: polkadot-service + bump: patch diff --git a/prdoc/pr_4563.prdoc b/prdoc/pr_4563.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3780eee5898b555f44f9d7c7d6670e2c06ff4702 --- /dev/null +++ b/prdoc/pr_4563.prdoc @@ -0,0 +1,12 @@ +title: Try State Hook for Bounties. + +doc: + - audience: Runtime User + description: | + Invariants for storage items in the bounties pallet. Enforces the following Invariants: + 1.`BountyCount` should be greater or equals to the length of the number of items in `Bounties`. + 2.`BountyCount` should be greater or equals to the length of the number of items in `BountyDescriptions`. + 3. Number of items in `Bounties` should be the same as `BountyDescriptions` length. +crates: +- name: pallet-bounties + bump: minor diff --git a/prdoc/pr_4738.prdoc b/prdoc/pr_4738.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..751f318e64f34355f009e6105f9f9d4d657ab1f2 --- /dev/null +++ b/prdoc/pr_4738.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add CheckMetadata SignedExtension to Rococo and Westend Coretime chains + +doc: + - audience: Runtime User + description: | + This brings support for the new Ledger app and similar hardware wallets to the Coretime + Chain on Rococo and Westend. These hardware wallets will be able to decode the transaction + using the metadata. The runtime will ensure that the metadata used for this decoding process + is correct and that the online wallet did not try to trick you. + +crates: + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major diff --git a/prdoc/pr_4777.prdoc b/prdoc/pr_4777.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..07fa8decebe08bfaad0fe0c621bbda8e016502ab --- /dev/null +++ b/prdoc/pr_4777.prdoc @@ -0,0 +1,27 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCM builder pattern allows clear_origin before buy_execution. + +doc: + - audience: Runtime Dev + description: | + Added clear_origin as an allowed command after commands that load the holdings register, in the safe xcm builder. + Previously, although it's logically allowed, an XCM could not be built like this: + ```rust + let xcm = Xcm::builder() + .withdraw_asset((Parent, 100u128)) + .clear_origin() + .buy_execution((Parent, 1u128)) + .deposit_asset(All, [0u8; 32]) + .build(); + ``` + You had to use the unsafe_builder. + Now, it's allowed using the default builder. + +crates: +- name: "xcm-procedural" + bump: minor +- name: "staging-xcm" + bump: minor + diff --git a/prdoc/pr_4839.prdoc b/prdoc/pr_4839.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..84bb393d4c45425f2dd691e16668957d45813313 --- /dev/null +++ b/prdoc/pr_4839.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-insecure-randomness-collective-flip + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-insecure-randomness-collective-flip`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-insecure-randomness-collective-flip + bump: patch diff --git a/prdoc/pr_4840.prdoc b/prdoc/pr_4840.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..265e1f41c3f384c1c0aa3721ec0727d63b7cd96e --- /dev/null +++ b/prdoc/pr_4840.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-membership + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-membership`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-membership + bump: minor \ No newline at end of file diff --git a/prdoc/pr_4863.prdoc b/prdoc/pr_4863.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..eb43b67a45c5c10cda58e84ff94d6e4815e888b4 --- /dev/null +++ b/prdoc/pr_4863.prdoc @@ -0,0 +1,10 @@ +title: "Make `tracing::log` work in the runtime" + +doc: + - audience: Runtime Dev + description: | + Make `tracing::log` work in the runtime as `log` works in the runtime. + +crates: + - name: sp-runtime + bump: patch diff --git a/prdoc/pr_4871.prdoc b/prdoc/pr_4871.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6ff36f59d70085f3dfff23678b9d64946d4f75de --- /dev/null +++ b/prdoc/pr_4871.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-tips + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-tips`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + +crates: + - name: pallet-tips + bump: minor diff --git a/prdoc/pr_4885.prdoc b/prdoc/pr_4885.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..50dc31bc1b8fad8886a7ca39a1f4c7fda7e5592e --- /dev/null +++ b/prdoc/pr_4885.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-transaction-storage + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-transaction-storage`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + +crates: + - name: pallet-transaction-storage + bump: minor diff --git a/prdoc/pr_4888.prdoc b/prdoc/pr_4888.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e8cfb25d924d23fe05a44b6a7f076f70dc999e68 --- /dev/null +++ b/prdoc/pr_4888.prdoc @@ -0,0 +1,35 @@ +title: "Allow any asset over the bridge lane between the two Asset Hubs" + +doc: + - audience: Runtime User + description: | + Allow all Rococo-native, Westend-native and Ethereum-native assets to flow over + the bridge between the Rococo and Westend AssetHubs. + + On Rococo Asset Hub, we allow Westend Asset Hub to act as reserve for any asset + native to the Westend ecosystem. + We also allow Ethereum contracts to act as reserves for the foreign assets + identified by the same respective contracts locations (on the other side of Snowbridge). + + On Westend Asset Hub, we allow Rococo Asset Hub to act as reserve for any asset + native to the Rococo or Ethereum ecosystems (practically providing Westend access + to Ethereum assets through double bridging: Ethereum <> Rococo <> Westend). + +crates: + - name: assets-common + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: asset-hub-rococo-emulated-chain + bump: minor + - name: asset-hub-rococo-integration-tests + bump: minor + - name: bridge-hub-rococo-integration-tests + bump: minor + - name: bridge-hub-westend-integration-tests + bump: minor + - name: emulated-integration-tests-common + bump: minor + diff --git a/prdoc/pr_4902.prdoc b/prdoc/pr_4902.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..19fe168a74abe316ee7e5acae7f1dfd0912c1486 --- /dev/null +++ b/prdoc/pr_4902.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-vesting + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-vesting`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + +crates: + - name: pallet-vesting + bump: minor diff --git a/prdoc/pr_4912.prdoc b/prdoc/pr_4912.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..dd96054b81fa3853d695b4baa4c9474ec4ea8338 --- /dev/null +++ b/prdoc/pr_4912.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-babe + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-babe`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + When accessed outside the pallet, use the public functions of storage. + +crates: + - name: pallet-babe + bump: minor diff --git a/prdoc/pr_4922.prdoc b/prdoc/pr_4922.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2e2dd26947c0d486fa92023c3021cdcb52128938 --- /dev/null +++ b/prdoc/pr_4922.prdoc @@ -0,0 +1,15 @@ +title: Optimize finalization performance + +doc: + - audience: Node Dev + description: | + Finalization algorithm was replaced with a more efficient version, data structures refactored to be faster and do + fewer memory allocations. As the result some APIs have changed in a minor, but incompatible way. + +crates: +- name: sc-client-api + bump: major +- name: sc-client-db + bump: major +- name: sp-blockchain + bump: major diff --git a/prdoc/pr_4932.prdoc b/prdoc/pr_4932.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..94af00d9249eb15c9b9c372cfb96a6a7f48862bc --- /dev/null +++ b/prdoc/pr_4932.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove relay-chain consensus authoring support for asset-hub chains from polkadot-parachain. + +doc: + - audience: Node Operator + description: | + The polkadot-parachain node had special handling for asset-hub parachains. They started out + using relay-chain consensus and later migrated to Aura as soon as it became available. The codepath for authoring + with relay chain consensus has been removed, since all asset hub chains have long migrated. + +crates: + - name: polkadot-parachain-bin + bump: major diff --git a/prdoc/pr_4935.prdoc b/prdoc/pr_4935.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2b06899b63398353209eb38ad5410e24fc3e4f6d --- /dev/null +++ b/prdoc/pr_4935.prdoc @@ -0,0 +1,75 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Bridges V2 refactoring backport and `pallet_bridge_messages` simplifications" + +doc: + - audience: Runtime Dev + description: | + This introduces several simplifications to the pallet_bridge_messages::Config configuration. + Types like `BridgedChainId`, `MaxUnrewardedRelayerEntriesAtInboundLane`, `MaxUnconfirmedMessagesAtInboundLane`, `MaximalOutboundPayloadSize`, + `InboundRelayer`, `TargetHeaderChain`, and `SourceHeaderChain` were removed. + Now, you only need to provide specific bridging chain configurations for `ThisChain`, `BridgedChain`, and `BridgedHeaderChain`. + + If you previously specified implementations for the bp_runtime::Chain* traits, those will fit here exactly, for example: + ``` + type ThisChain = bp_bridge_hub_rococo::BridgeHubRococo; + type BridgedChain = bp_bridge_hub_westend::BridgeHubWestend; + type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< + Runtime, + BridgeParachainWestendInstance, + bp_bridge_hub_westend::BridgeHubWestend, + >; + ``` + +crates: + - name: pallet-bridge-messages + bump: major + - name: bridge-runtime-common + bump: major + - name: bp-header-chain + bump: major + - name: bp-runtime + bump: major + - name: bp-messages + bump: major + - name: bp-polkadot-core + bump: patch + - name: bp-bridge-hub-kusama + bump: minor + - name: bp-bridge-hub-polkadot + bump: minor + - name: bp-bridge-hub-rococo + bump: minor + - name: bp-bridge-hub-westend + bump: minor + - name: bp-kusama + bump: minor + - name: bp-polkadot + bump: minor + - name: bp-polkadot-bulletin + bump: minor + - name: bp-rococo + bump: minor + - name: bp-test-utils + bump: patch + - name: bp-westend + bump: minor + - name: bridge-hub-test-utils + bump: major + - name: pallet-bridge-grandpa + bump: patch + - name: pallet-bridge-parachains + bump: patch + - name: pallet-bridge-relayers + bump: patch + - name: pallet-xcm-bridge-hub + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major diff --git a/prdoc/pr_4943.prdoc b/prdoc/pr_4943.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..705325126060b8aa7eeab33ff2a03deaaa8b2919 --- /dev/null +++ b/prdoc/pr_4943.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Update definition of frozen balance (docs PR) + +doc: + - audience: Runtime Dev + description: | + This PR fixes a bug in the docs located in the definition of frozen balances. In addition, it extends that definition for completeness. + +crates: +- name: frame-support + bump: patch \ No newline at end of file diff --git a/prdoc/pr_4978.prdoc b/prdoc/pr_4978.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1f86d512f2c78aa3910ace03a1216eb04faf517b --- /dev/null +++ b/prdoc/pr_4978.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add MAX_INSTRUCTIONS_TO_DECODE to XCMv2 + +doc: + - audience: Runtime User + description: | + Added a max number of instructions to XCMv2. If using XCMv2, you'll have to take this limit into account. + It was set to 100. + - audience: Runtime Dev + description: | + Added a max number of instructions to XCMv2. If using XCMv2, you'll have to take this limit into account. + It was set to 100. + +crates: + - name: staging-xcm + bump: minor diff --git a/scripts/bench-all.sh b/scripts/bench-all.sh new file mode 100755 index 0000000000000000000000000000000000000000..e5512e26bbad75248f2983a2e3a653e3d1328435 --- /dev/null +++ b/scripts/bench-all.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -eu -o pipefail +shopt -s inherit_errexit +shopt -s globstar + +. "$(realpath "$(dirname "${BASH_SOURCE[0]}")/command-utils.sh")" + +get_arg optional --pallet "$@" +PALLET="${out:-""}" + +if [[ ! -z "$PALLET" ]]; then + . "$(dirname "${BASH_SOURCE[0]}")/lib/bench-all-pallet.sh" "$@" +else + . "$(dirname "${BASH_SOURCE[0]}")/bench.sh" --subcommand=all "$@" +fi diff --git a/scripts/bench.sh b/scripts/bench.sh new file mode 100755 index 0000000000000000000000000000000000000000..2f4ef7ec6a14118630964a835d181d9631a0c605 --- /dev/null +++ b/scripts/bench.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# Initially based on https://github.com/paritytech/bench-bot/blob/cd3b2943d911ae29e41fe6204788ef99c19412c3/bench.js + +# Most external variables used in this script, such as $GH_CONTRIBUTOR, are +# related to https://github.com/paritytech/try-runtime-bot + +# This script relies on $GITHUB_TOKEN which is probably a protected GitLab CI +# variable; if this assumption holds true, it is implied that this script should +# be ran only on protected pipelines + +set -eu -o pipefail +shopt -s inherit_errexit + +# realpath allows to reuse the current +BENCH_ROOT_DIR=$(realpath "$(dirname "${BASH_SOURCE[0]}")") + +. "$(realpath "$(dirname "${BASH_SOURCE[0]}")/command-utils.sh")" + +repository_name="$(basename "$PWD")" + +get_arg optional --target_dir "$@" +target_dir="${out:-""}" + +get_arg optional --noexit "$@" +noexit="${out:-""}" + +output_path="." + +profile="production" + +if [[ "$repository_name" == "polkadot-sdk" ]]; then + output_path="./$target_dir" +fi + +cargo_run_benchmarks="cargo run --quiet --profile=${profile}" + +echo "Repository: $repository_name" +echo "Target Dir: $target_dir" +echo "Output Path: $output_path" + +cargo_run() { + echo "Running $cargo_run_benchmarks" "${args[@]}" + + # if not patched with PATCH_something=123 then use --locked + if [[ -z "${BENCH_PATCHED:-}" ]]; then + cargo_run_benchmarks+=" --locked" + fi + + $cargo_run_benchmarks "${args[@]}" +} + + +main() { + + # Remove the "github" remote since the same repository might be reused by a + # GitLab runner, therefore the remote might already exist from a previous run + # in case it was not cleaned up properly for some reason + &>/dev/null git remote remove github || : + + tmp_dirs=() + cleanup() { + exit_code=$? + # Clean up the "github" remote at the end since it contains the + # $GITHUB_TOKEN secret, which is only available for protected pipelines on + # GitLab + &>/dev/null git remote remove github || : + rm -rf "${tmp_dirs[@]}" + echo "Done, exit: $exit_code" + exit $exit_code + } + + # avoid exit if --noexit is passed + if [ -z "$noexit" ]; then + trap cleanup EXIT + fi + + # set -x + + get_arg required --subcommand "$@" + local subcommand="${out:-""}" + + case "$subcommand" in + runtime|pallet|xcm) + echo 'Running bench_pallet' + . "$BENCH_ROOT_DIR/lib/bench-pallet.sh" "$@" + ;; + overhead) + echo 'Running bench_overhead' + . "$BENCH_ROOT_DIR/lib/bench-overhead.sh" "$@" + ;; + all) + echo "Running all-$target_dir" + . "$BENCH_ROOT_DIR/lib/bench-all-${target_dir}.sh" "$@" + ;; + *) + die "Invalid subcommand $subcommand to process_args" + ;; + esac + + # set +x + + # in case we used diener to patch some dependency during benchmark execution, + # revert the patches so that they're not included in the diff + git checkout --quiet HEAD Cargo.toml + + # Save the generated weights to GitLab artifacts in case commit+push fails + echo "Showing weights diff for command" + git diff -P | tee -a "${ARTIFACTS_DIR}/weights.patch" + echo "Wrote weights patch to \"${ARTIFACTS_DIR}/weights.patch\"" + + + # instead of using `cargo run --locked`, we allow the Cargo files to be updated + # but avoid committing them. It is so `cmd_runner_apply_patches` can work + git restore --staged Cargo.* +} + +main "$@" diff --git a/scripts/command-utils.sh b/scripts/command-utils.sh new file mode 100644 index 0000000000000000000000000000000000000000..252e4c86480e6b259af8a46038e1b9e0658e70fb --- /dev/null +++ b/scripts/command-utils.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +if [ "${LOADED_UTILS_SH:-}" ]; then + return +else + export LOADED_UTILS_SH=true +fi + +export ARTIFACTS_DIR="$PWD/.git/.artifacts" + +die() { + if [ "${1:-}" ]; then + >&2 echo "$1" + fi + exit 1 +} + +get_arg() { + local arg_type="$1" + shift + + local is_required + case "$arg_type" in + required|required-many) + is_required=true + ;; + optional|optional-many) ;; + *) + die "Invalid is_required argument \"$2\" in get_arg" + ;; + esac + + local has_many_values + if [ "${arg_type: -6}" == "-many" ]; then + has_many_values=true + fi + + local option_arg="$1" + shift + + local args=("$@") + + unset out + out=() + + local get_next_arg + for arg in "${args[@]}"; do + if [ "${get_next_arg:-}" ]; then + out+=("$arg") + unset get_next_arg + if [ ! "${has_many_values:-}" ]; then + break + fi + # --foo=bar (get the value after '=') + elif [ "${arg:0:$(( ${#option_arg} + 1 ))}" == "$option_arg=" ]; then + out+=("${arg:$(( ${#option_arg} + 1 ))}") + if [ ! "${has_many_values:-}" ]; then + break + fi + # --foo bar (get the next argument) + elif [ "$arg" == "$option_arg" ]; then + get_next_arg=true + fi + done + + # arg list ended with --something but no argument was provided next + if [ "${get_next_arg:-}" ]; then + die "Expected argument after \"${args[-1]}"\" + fi + + if [ "${out[0]:-}" ]; then + if [ ! "${has_many_values:-}" ]; then + out="${out[0]}" + fi + elif [ "${is_required:-}" ]; then + die "Argument $option_arg is required, but was not found" + else + unset out + fi +} diff --git a/scripts/lib/bench-all-cumulus.sh b/scripts/lib/bench-all-cumulus.sh new file mode 100755 index 0000000000000000000000000000000000000000..f4c2a35c6b6b7ed8d30fa058666453f5c321b8e1 --- /dev/null +++ b/scripts/lib/bench-all-cumulus.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +# originally moved from https://github.com/paritytech/cumulus/blob/445f9277ab55b4d930ced4fbbb38d27c617c6658/scripts/benchmarks-ci.sh + +# default RUST_LOG is warn, but could be overridden +export RUST_LOG="${RUST_LOG:-error}" + +THIS_DIR=$(dirname "${BASH_SOURCE[0]}") +. "$THIS_DIR/../command-utils.sh" + +POLKADOT_PARACHAIN="./target/$profile/polkadot-parachain" + +run_cumulus_bench() { + local artifactsDir="$ARTIFACTS_DIR" + local category=$1 + local runtimeName=$2 + local paraId=${3:-} + + local benchmarkOutput="$output_path/parachains/runtimes/$category/$runtimeName/src/weights" + local benchmarkRuntimeChain + if [[ ! -z "$paraId" ]]; then + benchmarkRuntimeChain="${runtimeName}-dev-$paraId" + else + benchmarkRuntimeChain="$runtimeName-dev" + fi + + local benchmarkMetadataOutputDir="$artifactsDir/$runtimeName" + mkdir -p "$benchmarkMetadataOutputDir" + + # Load all pallet names in an array. + echo "[+] Listing pallets for runtime $runtimeName for chain: $benchmarkRuntimeChain ..." + local pallets=($( + $POLKADOT_PARACHAIN benchmark pallet --list --chain="${benchmarkRuntimeChain}" |\ + tail -n+2 |\ + cut -d',' -f1 |\ + sort |\ + uniq + )) + + if [ ${#pallets[@]} -ne 0 ]; then + echo "[+] Benchmarking ${#pallets[@]} pallets for runtime $runtimeName for chain: $benchmarkRuntimeChain, pallets:" + for pallet in "${pallets[@]}"; do + echo " [+] $pallet" + done + else + echo "$runtimeName pallet list not found in benchmarks-ci.sh" + exit 1 + fi + + for pallet in "${pallets[@]}"; do + # (by default) do not choose output_file, like `pallet_assets.rs` because it does not work for multiple instances + # `benchmark pallet` command will decide the output_file name if there are multiple instances + local output_file="" + local extra_args="" + # a little hack for pallet_xcm_benchmarks - we want to force custom implementation for XcmWeightInfo + if [[ "$pallet" == "pallet_xcm_benchmarks::generic" ]] || [[ "$pallet" == "pallet_xcm_benchmarks::fungible" ]]; then + output_file="xcm/${pallet//::/_}.rs" + extra_args="--template=$output_path/templates/xcm-bench-template.hbs" + fi + $POLKADOT_PARACHAIN benchmark pallet \ + $extra_args \ + --chain="${benchmarkRuntimeChain}" \ + --wasm-execution=compiled \ + --pallet="$pallet" \ + --no-storage-info \ + --no-median-slopes \ + --no-min-squares \ + --extrinsic='*' \ + --steps=50 \ + --repeat=20 \ + --json \ + --header="$output_path/file_header.txt" \ + --output="${benchmarkOutput}/${output_file}" >> "$benchmarkMetadataOutputDir/${pallet//::/_}_benchmark.json" + done +} + + +echo "[+] Compiling benchmarks..." +cargo build --profile $profile --locked --features=runtime-benchmarks -p polkadot-parachain-bin + +# Run benchmarks for all pallets of a given runtime if runtime argument provided +get_arg optional --runtime "$@" +runtime="${out:-""}" + +if [[ $runtime ]]; then + paraId="" + case "$runtime" in + asset-*) + category="assets" + ;; + collectives-*) + category="collectives" + ;; + coretime-*) + category="coretime" + ;; + bridge-*) + category="bridge-hubs" + ;; + contracts-*) + category="contracts" + ;; + people-*) + category="people" + ;; + glutton-*) + category="glutton" + paraId="1300" + ;; + *) + echo "Unknown runtime: $runtime" + exit 1 + ;; + esac + + run_cumulus_bench $category $runtime $paraId + +else # run all + # Assets + run_cumulus_bench assets asset-hub-rococo + run_cumulus_bench assets asset-hub-westend + + # Collectives + run_cumulus_bench collectives collectives-westend + + # Coretime + run_cumulus_bench coretime coretime-rococo + run_cumulus_bench coretime coretime-westend + + # People + run_cumulus_bench people people-rococo + run_cumulus_bench people people-westend + + # Bridge Hubs + run_cumulus_bench bridge-hubs bridge-hub-rococo + run_cumulus_bench bridge-hubs bridge-hub-westend + + # Glutton + run_cumulus_bench glutton glutton-westend 1300 +fi diff --git a/scripts/lib/bench-all-pallet.sh b/scripts/lib/bench-all-pallet.sh new file mode 100644 index 0000000000000000000000000000000000000000..e6908045ddbd7f34ba7cad0e4c102e4606dff7aa --- /dev/null +++ b/scripts/lib/bench-all-pallet.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +set -eu -o pipefail +shopt -s inherit_errexit +shopt -s globstar + +. "$(dirname "${BASH_SOURCE[0]}")/../command-utils.sh" + +get_arg required --pallet "$@" +PALLET="${out:-""}" + +REPO_NAME="$(basename "$PWD")" +BASE_COMMAND="$(dirname "${BASH_SOURCE[0]}")/../../bench/bench.sh --noexit=true --subcommand=pallet" + +WEIGHT_FILE_PATHS=( $(find . -type f -name "${PALLET}.rs" -path "**/weights/*" | sed 's|^\./||g') ) + +# convert pallet_ranked_collective to ranked-collective +CLEAN_PALLET=$(echo $PALLET | sed 's/pallet_//g' | sed 's/_/-/g') + +# add substrate pallet weights to a list +SUBSTRATE_PALLET_PATH=$(ls substrate/frame/$CLEAN_PALLET/src/weights.rs || :) +if [ ! -z "${SUBSTRATE_PALLET_PATH}" ]; then + WEIGHT_FILE_PATHS+=("$SUBSTRATE_PALLET_PATH") +fi + +# add trappist pallet weights to a list +TRAPPIST_PALLET_PATH=$(ls pallet/$CLEAN_PALLET/src/weights.rs || :) +if [ ! -z "${TRAPPIST_PALLET_PATH}" ]; then + WEIGHT_FILE_PATHS+=("$TRAPPIST_PALLET_PATH") +fi + +COMMANDS=() + +if [ "${#WEIGHT_FILE_PATHS[@]}" -eq 0 ]; then + echo "No weights files found for pallet: $PALLET" + exit 1 +else + echo "Found weights files for pallet: $PALLET" +fi + +for f in ${WEIGHT_FILE_PATHS[@]}; do + echo "- $f" + # f examples: + # cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_balances.rs + # polkadot/runtime/rococo/src/weights/pallet_balances.rs + # runtime/trappist/src/weights/pallet_assets.rs + TARGET_DIR=$(echo $f | cut -d'/' -f 1) + + if [ "$REPO_NAME" == "polkadot-sdk" ]; then + case $TARGET_DIR in + cumulus) + TYPE=$(echo $f | cut -d'/' -f 2) + # Example: cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_balances.rs + if [ "$TYPE" == "parachains" ]; then + RUNTIME=$(echo $f | cut -d'/' -f 5) + RUNTIME_DIR=$(echo $f | cut -d'/' -f 4) + COMMANDS+=("$BASE_COMMAND --runtime=$RUNTIME --runtime_dir=$RUNTIME_DIR --target_dir=$TARGET_DIR --pallet=$PALLET") + fi + ;; + polkadot) + # Example: polkadot/runtime/rococo/src/weights/pallet_balances.rs + RUNTIME=$(echo $f | cut -d'/' -f 3) + COMMANDS+=("$BASE_COMMAND --runtime=$RUNTIME --target_dir=$TARGET_DIR --pallet=$PALLET") + ;; + substrate) + # Example: substrate/frame/contracts/src/weights.rs + COMMANDS+=("$BASE_COMMAND --target_dir=$TARGET_DIR --runtime=dev --pallet=$PALLET") + ;; + *) + echo "Unknown dir: $TARGET_DIR" + exit 1 + ;; + esac + fi + + if [ "$REPO_NAME" == "trappist" ]; then + case $TARGET_DIR in + runtime) + TYPE=$(echo $f | cut -d'/' -f 2) + if [ "$TYPE" == "trappist" || "$TYPE" == "stout" ]; then + # Example: runtime/trappist/src/weights/pallet_assets.rs + COMMANDS+=("$BASE_COMMAND --target_dir=trappist --runtime=$TYPE --pallet=$PALLET") + fi + ;; + *) + echo "Unknown dir: $TARGET_DIR" + exit 1 + ;; + esac + fi +done + +for cmd in "${COMMANDS[@]}"; do + echo "Running command: $cmd" + . $cmd +done diff --git a/scripts/lib/bench-all-polkadot.sh b/scripts/lib/bench-all-polkadot.sh new file mode 100644 index 0000000000000000000000000000000000000000..ac52e00140e38de76fe082aa3fd0aaf670b465bb --- /dev/null +++ b/scripts/lib/bench-all-polkadot.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +# Runs all benchmarks for all pallets, for a given runtime, provided by $1 +# Should be run on a reference machine to gain accurate benchmarks +# current reference machine: https://github.com/paritytech/polkadot/pull/6508/files +# original source: https://github.com/paritytech/polkadot/blob/b9842c4b52f6791fef6c11ecd020b22fe614f041/scripts/run_all_benches.sh + +get_arg required --runtime "$@" +runtime="${out:-""}" + +# default RUST_LOG is error, but could be overridden +export RUST_LOG="${RUST_LOG:-error}" + +echo "[+] Compiling benchmarks..." +cargo build --profile $profile --locked --features=runtime-benchmarks -p polkadot + +POLKADOT_BIN="./target/$profile/polkadot" + +# Update the block and extrinsic overhead weights. +echo "[+] Benchmarking block and extrinsic overheads..." +OUTPUT=$( + $POLKADOT_BIN benchmark overhead \ + --chain="${runtime}-dev" \ + --wasm-execution=compiled \ + --weight-path="$output_path/runtime/${runtime}/constants/src/weights/" \ + --warmup=10 \ + --repeat=100 \ + --header="$output_path/file_header.txt" +) +if [ $? -ne 0 ]; then + echo "$OUTPUT" >> "$ERR_FILE" + echo "[-] Failed to benchmark the block and extrinsic overheads. Error written to $ERR_FILE; continuing..." +fi + + +# Load all pallet names in an array. +PALLETS=($( + $POLKADOT_BIN benchmark pallet --list --chain="${runtime}-dev" |\ + tail -n+2 |\ + cut -d',' -f1 |\ + sort |\ + uniq +)) + +echo "[+] Benchmarking ${#PALLETS[@]} pallets for runtime $runtime" + +# Define the error file. +ERR_FILE="${ARTIFACTS_DIR}/benchmarking_errors.txt" +# Delete the error file before each run. +rm -f $ERR_FILE + +# Benchmark each pallet. +for PALLET in "${PALLETS[@]}"; do + echo "[+] Benchmarking $PALLET for $runtime"; + + output_file="" + if [[ $PALLET == *"::"* ]]; then + # translates e.g. "pallet_foo::bar" to "pallet_foo_bar" + output_file="${PALLET//::/_}.rs" + fi + + OUTPUT=$( + $POLKADOT_BIN benchmark pallet \ + --chain="${runtime}-dev" \ + --steps=50 \ + --repeat=20 \ + --no-storage-info \ + --no-median-slopes \ + --no-min-squares \ + --pallet="$PALLET" \ + --extrinsic="*" \ + --execution=wasm \ + --wasm-execution=compiled \ + --header="$output_path/file_header.txt" \ + --output="$output_path/runtime/${runtime}/src/weights/${output_file}" 2>&1 + ) + if [ $? -ne 0 ]; then + echo "$OUTPUT" >> "$ERR_FILE" + echo "[-] Failed to benchmark $PALLET. Error written to $ERR_FILE; continuing..." + fi +done + +# Check if the error file exists. +if [ -f "$ERR_FILE" ]; then + echo "[-] Some benchmarks failed. See: $ERR_FILE" +else + echo "[+] All benchmarks passed." +fi diff --git a/scripts/lib/bench-all-substrate.sh b/scripts/lib/bench-all-substrate.sh new file mode 100644 index 0000000000000000000000000000000000000000..eeb18cdd8bbb31ee58daa93b35e45cf2a1f33e86 --- /dev/null +++ b/scripts/lib/bench-all-substrate.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# This file is part of Substrate. +# Copyright (C) 2022 Parity Technologies (UK) Ltd. +# SPDX-License-Identifier: Apache-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script has three parts which all use the Substrate runtime: +# - Pallet benchmarking to update the pallet weights +# - Overhead benchmarking for the Extrinsic and Block weights +# - Machine benchmarking +# +# Should be run on a reference machine to gain accurate benchmarks +# current reference machine: https://github.com/paritytech/substrate/pull/5848 + +# Original source: https://github.com/paritytech/substrate/blob/ff9921a260a67e3a71f25c8b402cd5c7da787a96/scripts/run_all_benchmarks.sh +# Fail if any sub-command in a pipe fails, not just the last one. +set -o pipefail +# Fail on undeclared variables. +set -u +# Fail if any sub-command fails. +set -e +# Fail on traps. +# set -E + +# default RUST_LOG is warn, but could be overridden +export RUST_LOG="${RUST_LOG:-error}" + +echo "[+] Compiling Substrate benchmarks..." +cargo build --profile=$profile --locked --features=runtime-benchmarks -p staging-node-cli + +# The executable to use. +SUBSTRATE="./target/$profile/substrate-node" + +# Manually exclude some pallets. +EXCLUDED_PALLETS=( + # Helper pallets + "pallet_election_provider_support_benchmarking" + # Pallets without automatic benchmarking + "pallet_babe" + "pallet_grandpa" + "pallet_mmr" + "pallet_offences" + # Only used for testing, does not need real weights. + "frame_benchmarking_pallet_pov" + "pallet_example_tasks" + "pallet_example_basic" + "pallet_example_split" + "pallet_example_kitchensink" + "pallet_example_mbm" + "tasks_example" +) + +# Load all pallet names in an array. +ALL_PALLETS=($( + $SUBSTRATE benchmark pallet --list --chain=dev |\ + tail -n+2 |\ + cut -d',' -f1 |\ + sort |\ + uniq +)) + +# Define the error file. +ERR_FILE="${ARTIFACTS_DIR}/benchmarking_errors.txt" + +# Delete the error file before each run. +rm -f "$ERR_FILE" + +mkdir -p "$(dirname "$ERR_FILE")" + +# Update the block and extrinsic overhead weights. +echo "[+] Benchmarking block and extrinsic overheads..." +OUTPUT=$( + $SUBSTRATE benchmark overhead \ + --chain=dev \ + --wasm-execution=compiled \ + --weight-path="$output_path/frame/support/src/weights/" \ + --header="$output_path/HEADER-APACHE2" \ + --warmup=10 \ + --repeat=100 2>&1 +) +if [ $? -ne 0 ]; then + echo "$OUTPUT" >> "$ERR_FILE" + echo "[-] Failed to benchmark the block and extrinsic overheads. Error written to $ERR_FILE; continuing..." +fi + +echo "[+] Benchmarking ${#ALL_PALLETS[@]} Substrate pallets and excluding ${#EXCLUDED_PALLETS[@]}." + +echo "[+] Excluded pallets ${EXCLUDED_PALLETS[@]}" +echo "[+] ------ " +echo "[+] Whole list pallets ${ALL_PALLETS[@]}" + +# Benchmark each pallet. +for PALLET in "${ALL_PALLETS[@]}"; do + FOLDER="$(echo "${PALLET#*_}" | tr '_' '-')"; + WEIGHT_FILE="$output_path/frame/${FOLDER}/src/weights.rs" + + # Skip the pallet if it is in the excluded list. + + if [[ " ${EXCLUDED_PALLETS[@]} " =~ " ${PALLET} " ]]; then + echo "[+] Skipping $PALLET as it is in the excluded list." + continue + fi + + echo "[+] Benchmarking $PALLET with weight file $WEIGHT_FILE"; + + set +e # Disable exit on error for the benchmarking of the pallets + OUTPUT=$( + $SUBSTRATE benchmark pallet \ + --chain=dev \ + --steps=50 \ + --repeat=20 \ + --pallet="$PALLET" \ + --no-storage-info \ + --no-median-slopes \ + --no-min-squares \ + --extrinsic="*" \ + --wasm-execution=compiled \ + --heap-pages=4096 \ + --output="$WEIGHT_FILE" \ + --header="$output_path/HEADER-APACHE2" \ + --template="$output_path/.maintain/frame-weight-template.hbs" 2>&1 + ) + if [ $? -ne 0 ]; then + echo -e "$PALLET: $OUTPUT\n" >> "$ERR_FILE" + echo "[-] Failed to benchmark $PALLET. Error written to $ERR_FILE; continuing..." + fi + set -e # Re-enable exit on error +done + + +# Check if the error file exists. +if [ -s "$ERR_FILE" ]; then + echo "[-] Some benchmarks failed. See: $ERR_FILE" + exit 1 +else + echo "[+] All benchmarks passed." +fi diff --git a/scripts/lib/bench-overhead.sh b/scripts/lib/bench-overhead.sh new file mode 100644 index 0000000000000000000000000000000000000000..c4cca8b4c128ca201adebb1e353ac11ad30b0825 --- /dev/null +++ b/scripts/lib/bench-overhead.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +THIS_DIR=$(dirname "${BASH_SOURCE[0]}") +. "$THIS_DIR/../command-utils.sh" + +bench_overhead_common_args=( + -- + benchmark + overhead + --wasm-execution=compiled + --warmup=10 + --repeat=100 +) +bench_overhead() { + local args + case "$target_dir" in + substrate) + args=( + --bin=substrate + "${bench_overhead_common_args[@]}" + --header="$output_path/HEADER-APACHE2" + --weight-path="$output_path/frame/support/src/weights" + --chain="dev" + ) + ;; + polkadot) + get_arg required --runtime "$@" + local runtime="${out:-""}" + args=( + --bin=polkadot + "${bench_overhead_common_args[@]}" + --header="$output_path/file_header.txt" + --weight-path="$output_path/runtime/$runtime/constants/src/weights" + --chain="$runtime-dev" + ) + ;; + cumulus) + get_arg required --runtime "$@" + local runtime="${out:-""}" + args=( + -p=polkadot-parachain-bin + "${bench_overhead_common_args[@]}" + --header="$output_path/file_header.txt" + --weight-path="$output_path/parachains/runtimes/assets/$runtime/src/weights" + --chain="$runtime" + ) + ;; + trappist) + get_arg required --runtime "$@" + local runtime="${out:-""}" + args=( + "${bench_overhead_common_args[@]}" + --header="$output_path/templates/file_header.txt" + --weight-path="$output_path/runtime/$runtime/src/weights" + --chain="$runtime-dev" + ) + ;; + *) + die "Target Dir \"$target_dir\" is not supported in bench_overhead" + ;; + esac + + cargo_run "${args[@]}" +} + +bench_overhead "$@" diff --git a/scripts/lib/bench-pallet.sh b/scripts/lib/bench-pallet.sh new file mode 100644 index 0000000000000000000000000000000000000000..15eac31e3a45cbb3c4f7c2dde3a1006164259f3a --- /dev/null +++ b/scripts/lib/bench-pallet.sh @@ -0,0 +1,178 @@ +#!/bin/bash + +THIS_DIR=$(dirname "${BASH_SOURCE[0]}") +. "$THIS_DIR/../command-utils.sh" + +bench_pallet_common_args=( + -- + benchmark + pallet + --steps=50 + --repeat=20 + --extrinsic="*" + --wasm-execution=compiled + --heap-pages=4096 + --json-file="${ARTIFACTS_DIR}/bench.json" +) +bench_pallet() { + get_arg required --subcommand "$@" + local subcommand="${out:-""}" + + get_arg required --runtime "$@" + local runtime="${out:-""}" + + get_arg required --pallet "$@" + local pallet="${out:-""}" + + local args + case "$target_dir" in + substrate) + args=( + --features=runtime-benchmarks + --manifest-path="$output_path/bin/node/cli/Cargo.toml" + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="$runtime" + ) + + case "$subcommand" in + pallet) + # Translates e.g. "pallet_foo::bar" to "pallet_foo_bar" + local output_dir="${pallet//::/_}" + + # Substrate benchmarks are output to the "frame" directory but they aren't + # named exactly after the $pallet argument. For example: + # - When $pallet == pallet_balances, the output folder is frame/balances + # - When $pallet == frame_benchmarking, the output folder is frame/benchmarking + # The common pattern we infer from those examples is that we should remove + # the prefix + if [[ "$output_dir" =~ ^[A-Za-z]*[^A-Za-z](.*)$ ]]; then + output_dir="${BASH_REMATCH[1]}" + fi + + # We also need to translate '_' to '-' due to the folders' naming + # conventions + output_dir="${output_dir//_/-}" + + args+=( + --header="$output_path/HEADER-APACHE2" + --output="$output_path/frame/$output_dir/src/weights.rs" + --template="$output_path/.maintain/frame-weight-template.hbs" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + polkadot) + # For backward compatibility: replace "-dev" with "" + runtime=${runtime/-dev/} + + local weights_dir="$output_path/runtime/${runtime}/src/weights" + + args=( + --bin=polkadot + --features=runtime-benchmarks + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="${runtime}-dev" + ) + + case "$subcommand" in + pallet) + args+=( + --header="$output_path/file_header.txt" + --output="${weights_dir}/" + ) + ;; + xcm) + args+=( + --header="$output_path/file_header.txt" + --template="$output_path/xcm/pallet-xcm-benchmarks/template.hbs" + --output="${weights_dir}/xcm/" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + cumulus) + get_arg required --runtime_dir "$@" + local runtime_dir="${out:-""}" + local chain="$runtime" + + # to support specifying parachain id from runtime name (e.g. ["glutton-westend", "glutton-westend-dev-1300"]) + # If runtime ends with "-dev" or "-dev-\d+", leave as it is, otherwise concat "-dev" at the end of $chain + if [[ ! "$runtime" =~ -dev(-[0-9]+)?$ ]]; then + chain="${runtime}-dev" + fi + + # replace "-dev" or "-dev-\d+" with "" for runtime + runtime=$(echo "$runtime" | sed 's/-dev.*//g') + + args=( + -p=polkadot-parachain-bin + --features=runtime-benchmarks + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="${chain}" + --header="$output_path/file_header.txt" + ) + + case "$subcommand" in + pallet) + args+=( + --output="$output_path/parachains/runtimes/$runtime_dir/$runtime/src/weights/" + ) + ;; + xcm) + mkdir -p "$output_path/parachains/runtimes/$runtime_dir/$runtime/src/weights/xcm" + args+=( + --template="$output_path/templates/xcm-bench-template.hbs" + --output="$output_path/parachains/runtimes/$runtime_dir/$runtime/src/weights/xcm/" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + trappist) + local weights_dir="$output_path/runtime/$runtime/src/weights" + + args=( + --features=runtime-benchmarks + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="${runtime}-dev" + --header="$output_path/templates/file_header.txt" + ) + + case "$subcommand" in + pallet) + args+=( + --output="${weights_dir}/" + ) + ;; + xcm) + args+=( + --template="$output_path/templates/xcm-bench-template.hbs" + --output="${weights_dir}/xcm/" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + *) + die "Repository $target_dir is not supported in bench_pallet" + ;; + esac + + cargo_run "${args[@]}" +} + +bench_pallet "$@" diff --git a/scripts/sync.sh b/scripts/sync.sh new file mode 100755 index 0000000000000000000000000000000000000000..b5d8a521993717d23469ddce4e1399bdd265220f --- /dev/null +++ b/scripts/sync.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +set -eu -o pipefail + +. "$(realpath "$(dirname "${BASH_SOURCE[0]}")/command-utils.sh")" + + +# Function to check syncing status +check_syncing() { + # Send the system_health request and parse the isSyncing field + RESPONSE=$(curl -sSX POST http://127.0.0.1:9944 \ + --header 'Content-Type: application/json' \ + --data-raw '{"jsonrpc": "2.0", "method": "system_health", "params": [], "id": "1"}') + + # Check for errors in the curl command + if [ $? -ne 0 ]; then + echo "Error: Unable to send request to Polkadot node" + fi + + IS_SYNCING=$(echo $RESPONSE | jq -r '.result.isSyncing') + + # Check for errors in the jq command or missing field in the response + if [ $? -ne 0 ] || [ "$IS_SYNCING" == "null" ]; then + echo "Error: Unable to parse sync status from response" + fi + + # Return the isSyncing value + echo $IS_SYNCING +} + +main() { + get_arg required --chain "$@" + local chain="${out:-""}" + + get_arg required --type "$@" + local type="${out:-""}" + + export RUST_LOG="${RUST_LOG:-remote-ext=debug,runtime=trace}" + + cargo build --release + + cp "./target/release/polkadot" ./polkadot-bin + + # Start sync. + # "&" runs the process in the background + # "> /dev/tty" redirects the output of the process to the terminal + ./polkadot-bin --sync="$type" --chain="$chain" > "$ARTIFACTS_DIR/sync.log" 2>&1 & + + # Get the PID of process + POLKADOT_SYNC_PID=$! + + sleep 10 + + # Poll the node every 100 seconds until syncing is complete + while :; do + SYNC_STATUS="$(check_syncing)" + if [ "$SYNC_STATUS" == "true" ]; then + echo "Node is still syncing..." + sleep 100 + elif [ "$SYNC_STATUS" == "false" ]; then + echo "Node sync is complete!" + kill "$POLKADOT_SYNC_PID" # Stop the Polkadot node process once syncing is complete + exit 0 # Success + elif [[ "$SYNC_STATUS" = Error:* ]]; then + echo "$SYNC_STATUS" + exit 1 # Error + else + echo "Unknown error: $SYNC_STATUS" + exit 1 # Unknown error + fi + done +} + +main "$@" diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index b756f3504655bf44f0b5e51699026dcd82afaca3..6b061955184ea3ef982352011733616a9bb0520a 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -15,33 +15,33 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -array-bytes = "6.2.2" -clap = { version = "4.5.3", features = ["derive"] } +array-bytes = { workspace = true, default-features = true } +clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } -node-primitives = { path = "../primitives" } -node-testing = { path = "../testing" } -kitchensink-runtime = { path = "../runtime" } -sc-client-api = { path = "../../../client/api" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } +node-primitives = { workspace = true, default-features = true } +node-testing = { workspace = true } +kitchensink-runtime = { workspace = true } +sc-client-api = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -derive_more = { version = "0.99.17", default-features = false, features = ["display"] } -kvdb = "0.13.0" -kvdb-rocksdb = "0.19.0" -sp-trie = { path = "../../../primitives/trie" } -sp-core = { path = "../../../primitives/core" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-timestamp = { path = "../../../primitives/timestamp", default-features = false } -sp-tracing = { path = "../../../primitives/tracing" } -hash-db = "0.16.0" -tempfile = "3.1.0" -fs_extra = "1" -rand = { version = "0.8.5", features = ["small_rng"] } -lazy_static = "1.4.0" -parity-db = "0.4.12" -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -futures = { version = "0.3.30", features = ["thread-pool"] } +derive_more = { features = ["display"], workspace = true } +kvdb = { workspace = true } +kvdb-rocksdb = { workspace = true } +sp-trie = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-timestamp = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +hash-db = { workspace = true, default-features = true } +tempfile = { workspace = true } +fs_extra = { workspace = true } +rand = { features = ["small_rng"], workspace = true, default-features = true } +lazy_static = { workspace = true } +parity-db = { workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } diff --git a/substrate/bin/node/bench/src/import.rs b/substrate/bin/node/bench/src/import.rs index 78b280076e0bd625f2c6fa76beb76a9439a7187a..e340869dea0281092b41c94d107e65e4831c7e95 100644 --- a/substrate/bin/node/bench/src/import.rs +++ b/substrate/bin/node/bench/src/import.rs @@ -122,7 +122,8 @@ impl core::Benchmark for ImportBenchmark { match self.block_type { BlockType::RandomTransfersKeepAlive => { // should be 8 per signed extrinsic + 1 per unsigned - // we have 1 unsigned and the rest are signed in the block + // we have 2 unsigned (timestamp and glutton bloat) while the rest are + // signed in the block. // those 8 events per signed are: // - transaction paid for the transaction payment // - withdraw (Balances::Withdraw) for charging the transaction fee @@ -135,18 +136,18 @@ impl core::Benchmark for ImportBenchmark { // - extrinsic success assert_eq!( kitchensink_runtime::System::events().len(), - (self.block.extrinsics.len() - 1) * 8 + 1, + (self.block.extrinsics.len() - 2) * 8 + 2, ); }, BlockType::Noop => { assert_eq!( kitchensink_runtime::System::events().len(), // should be 2 per signed extrinsic + 1 per unsigned - // we have 1 unsigned and the rest are signed in the block + // we have 2 unsigned and the rest are signed in the block // those 2 events per signed are: // - deposit event for charging transaction fee // - extrinsic success - (self.block.extrinsics.len() - 1) * 2 + 1, + (self.block.extrinsics.len() - 2) * 2 + 2, ); }, _ => {}, diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 929cd6a29e3889dbd93d6e31277406cf61176114..ab665f0792a46814343bb39f7497ae925bbe8e68 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -37,53 +37,53 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -array-bytes = "6.1" -clap = { version = "4.5.3", features = ["derive"], optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } +array-bytes = { workspace = true, default-features = true } +clap = { features = ["derive"], optional = true, workspace = true } +codec = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } -jsonrpsee = { version = "0.22", features = ["server"] } -futures = "0.3.30" +jsonrpsee = { features = ["server"], workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -rand = "0.8" +rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } # The Polkadot-SDK: -polkadot-sdk = { path = "../../../../umbrella", features = ["node"] } +polkadot-sdk = { features = ["node"], workspace = true, default-features = true } # Shared code between the staging node and kitchensink runtime: -kitchensink-runtime = { path = "../runtime" } -node-rpc = { path = "../rpc" } -node-primitives = { path = "../primitives" } -node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } +kitchensink-runtime = { workspace = true } +node-rpc = { workspace = true } +node-primitives = { workspace = true, default-features = true } +node-inspect = { optional = true, workspace = true, default-features = true } [dev-dependencies] -futures = "0.3.30" -tempfile = "3.1.0" -assert_cmd = "2.0.2" -nix = { version = "0.28.0", features = ["signal"] } -regex = "1.6.0" -platforms = "3.0" -soketto = "0.7.1" -criterion = { version = "0.5.1", features = ["async_tokio"] } -tokio = { version = "1.22.0", features = ["macros", "parking_lot", "time"] } -tokio-util = { version = "0.7.4", features = ["compat"] } -wait-timeout = "0.2" -wat = "1.0" +futures = { workspace = true } +tempfile = { workspace = true } +assert_cmd = { workspace = true } +nix = { features = ["signal"], workspace = true } +regex = { workspace = true } +platforms = { workspace = true } +soketto = { workspace = true } +criterion = { features = ["async_tokio"], workspace = true, default-features = true } +tokio = { features = ["macros", "parking_lot", "time"], workspace = true, default-features = true } +tokio-util = { features = ["compat"], workspace = true } +wait-timeout = { workspace = true } +wat = { workspace = true } serde_json = { workspace = true, default-features = true } -scale-info = { version = "2.11.1", features = ["derive", "serde"] } +scale-info = { features = ["derive", "serde"], workspace = true, default-features = true } # These testing-only dependencies are not exported by the Polkadot-SDK crate: -node-testing = { path = "../testing" } -substrate-cli-test-utils = { path = "../../../test-utils/cli" } -sc-service-test = { path = "../../../client/service/test" } +node-testing = { workspace = true } +substrate-cli-test-utils = { workspace = true } +sc-service-test = { workspace = true } [build-dependencies] -clap = { version = "4.5.3", optional = true } -clap_complete = { version = "4.0.2", optional = true } +clap = { optional = true, workspace = true } +clap_complete = { optional = true, workspace = true } -node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } +node-inspect = { optional = true, workspace = true, default-features = true } -polkadot-sdk = { path = "../../../../umbrella", features = ["frame-benchmarking-cli", "sc-cli", "sc-storage-monitor", "substrate-build-script-utils"], optional = true } +polkadot-sdk = { features = ["frame-benchmarking-cli", "sc-cli", "sc-storage-monitor", "substrate-build-script-utils"], optional = true, workspace = true, default-features = true } [features] default = ["cli"] diff --git a/substrate/bin/node/cli/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json index e21fbb47da8c4619e0923c85bf3470828cd80b23..b63e5ff549ef9d0a5742e26fe03a6167821cc97e 100644 --- a/substrate/bin/node/cli/tests/res/default_genesis_config.json +++ b/substrate/bin/node/cli/tests/res/default_genesis_config.json @@ -16,6 +16,7 @@ "balances": { "balances": [] }, + "broker": {}, "transactionPayment": { "multiplier": "1000000000000000000" }, @@ -74,17 +75,20 @@ "glutton": { "compute": "0", "storage": "0", + "blockLength": "0", "trashDataCount": 0 }, "assets": { "assets": [], "metadata": [], - "accounts": [] + "accounts": [], + "nextAssetId": null }, "poolAssets": { "assets": [], "metadata": [], - "accounts": [] + "accounts": [], + "nextAssetId": null }, "transactionStorage": { "byteFee": 10, diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 5e4488903bf45fa32dd1f219bf9cf5766522e5bd..68769ffb4fa44794d98bfd56657d8cb7219f8a7c 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -15,17 +15,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } thiserror = { workspace = true } -sc-cli = { path = "../../../client/cli" } -sc-client-api = { path = "../../../client/api" } -sc-service = { path = "../../../client/service", default-features = false } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core" } -sp-io = { path = "../../../primitives/io" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-statement-store = { path = "../../../primitives/statement-store" } +sc-cli = { workspace = true } +sc-client-api = { workspace = true, default-features = true } +sc-service = { workspace = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } [features] runtime-benchmarks = [ diff --git a/substrate/bin/node/primitives/Cargo.toml b/substrate/bin/node/primitives/Cargo.toml index 24279ad09c3d9f4576a212d7c67ac24be27b8e22..de295fd59d45a73fee8736259ed91038dd2d40aa 100644 --- a/substrate/bin/node/primitives/Cargo.toml +++ b/substrate/bin/node/primitives/Cargo.toml @@ -16,8 +16,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/bin/node/rpc/Cargo.toml b/substrate/bin/node/rpc/Cargo.toml index 6ae80eb578596490753d903d253c01af2660ef4f..fa1e96e67e98250a1c3da6b6298307f00cb85acb 100644 --- a/substrate/bin/node/rpc/Cargo.toml +++ b/substrate/bin/node/rpc/Cargo.toml @@ -16,33 +16,33 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22", features = ["server"] } -node-primitives = { path = "../primitives" } -pallet-transaction-payment-rpc = { path = "../../../frame/transaction-payment/rpc" } -mmr-rpc = { path = "../../../client/merkle-mountain-range/rpc" } -sc-chain-spec = { path = "../../../client/chain-spec" } -sc-client-api = { path = "../../../client/api" } -sc-consensus-babe = { path = "../../../client/consensus/babe" } -sc-consensus-babe-rpc = { path = "../../../client/consensus/babe/rpc" } -sc-consensus-beefy = { path = "../../../client/consensus/beefy" } -sc-consensus-beefy-rpc = { path = "../../../client/consensus/beefy/rpc" } -sp-consensus-beefy = { path = "../../../primitives/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../client/consensus/grandpa" } -sc-consensus-grandpa-rpc = { path = "../../../client/consensus/grandpa/rpc" } -sc-mixnet = { path = "../../../client/mixnet" } -sc-rpc = { path = "../../../client/rpc" } -sc-rpc-api = { path = "../../../client/rpc-api" } -sc-rpc-spec-v2 = { path = "../../../client/rpc-spec-v2" } -sc-sync-state-rpc = { path = "../../../client/sync-state-rpc" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sp-api = { path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-statement-store = { path = "../../../primitives/statement-store" } -substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } -substrate-state-trie-migration-rpc = { path = "../../../utils/frame/rpc/state-trie-migration-rpc" } +jsonrpsee = { features = ["server"], workspace = true } +node-primitives = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +mmr-rpc = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-babe-rpc = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-beefy-rpc = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sc-consensus-grandpa-rpc = { workspace = true, default-features = true } +sc-mixnet = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } +sc-sync-state-rpc = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } +substrate-state-trie-migration-rpc = { workspace = true, default-features = true } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index e8cc7b3482b66ef8e850871670430b23f2923396..c1c470f1dcd6dfc7bf755d25609c68d6cb18fa03 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -19,29 +19,29 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", "max-encoded-len", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -static_assertions = "1.1.0" +], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +static_assertions = { workspace = true, default-features = true } log = { workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } # pallet-asset-conversion: turn on "num-traits" feature -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } -polkadot-sdk = { path = "../../../../umbrella", features = ["runtime", "tuples-96"], default-features = false } +polkadot-sdk = { features = ["runtime", "tuples-96"], workspace = true } # shared code between runtime and node -node-primitives = { path = "../primitives", default-features = false } +node-primitives = { workspace = true } # Example pallets that are not published: -pallet-example-mbm = { path = "../../../frame/examples/multi-block-migrations", default-features = false } -pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } +pallet-example-mbm = { workspace = true } +pallet-example-tasks = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 5ef490b4d6a18821208f1e149fd3d1bacf2b92f6..bb9240ea4bd87be22b5908f09a7491b9adf14628 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -49,7 +49,7 @@ use frame_support::{ AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Contains, Currency, EitherOfDiverse, EnsureOriginWithArg, EqualPrivilegeOnly, Imbalance, InsideBoth, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, Nothing, - OnUnbalanced, WithdrawReasons, + OnUnbalanced, VariantCountOf, WithdrawReasons, }, weights::{ constants::{ @@ -542,7 +542,7 @@ impl pallet_balances::Config for Runtime { type AccountStore = frame_system::Pallet; type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; } parameter_types! { @@ -1038,6 +1038,7 @@ impl pallet_ranked_collective::Config for Runtime { type MinRankOfClass = traits::Identity; type VoteWeight = pallet_ranked_collective::Geometric; type MemberSwappedHandler = (CoreFellowship, Salary); + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (CoreFellowship, Salary); } @@ -1212,8 +1213,6 @@ impl pallet_membership::Config for Runtime { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 1 * DOLLARS; pub const SpendPeriod: BlockNumber = 1 * DAYS; pub const Burn: Permill = Permill::from_percent(50); pub const TipCountdown: BlockNumber = 1 * DAYS; @@ -1230,19 +1229,11 @@ parameter_types! { impl pallet_treasury::Config for Runtime { type PalletId = TreasuryPalletId; type Currency = Balances; - type ApproveOrigin = EitherOfDiverse< - EnsureRoot, - pallet_collective::EnsureProportionAtLeast, - >; type RejectOrigin = EitherOfDiverse< EnsureRoot, pallet_collective::EnsureProportionMoreThan, >; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type ProposalBondMaximum = (); type SpendPeriod = SpendPeriod; type Burn = Burn; type BurnDestination = (); @@ -1296,6 +1287,7 @@ impl pallet_bounties::Config for Runtime { type MaximumReasonLength = MaximumReasonLength; type WeightInfo = pallet_bounties::weights::SubstrateWeight; type ChildBountyManager = ChildBounties; + type OnSlash = Treasury; } parameter_types! { @@ -1340,6 +1332,7 @@ impl pallet_tips::Config for Runtime { type TipReportDepositBase = TipReportDepositBase; type MaxTipAmount = ConstU128<{ 500 * DOLLARS }>; type WeightInfo = pallet_tips::weights::SubstrateWeight; + type OnSlash = Treasury; } parameter_types! { @@ -1876,6 +1869,7 @@ impl pallet_core_fellowship::Config for Runtime { type InductOrigin = pallet_core_fellowship::EnsureInducted; type ApproveOrigin = EnsureRootWithSuccess>; type PromoteOrigin = EnsureRootWithSuccess>; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = ConstU32<16_384>; type MaxRank = ConstU32<9>; } @@ -2105,10 +2099,6 @@ impl OnUnbalanced> for IntoAuthor { } } -parameter_types! { - pub storage CoretimeRevenue: Option<(BlockNumber, Balance)> = None; -} - pub struct CoretimeProvider; impl CoretimeInterface for CoretimeProvider { type AccountId = AccountId; @@ -2124,15 +2114,6 @@ impl CoretimeInterface for CoretimeProvider { _end_hint: Option, ) { } - fn check_notify_revenue_info() -> Option<(u32, Self::Balance)> { - let revenue = CoretimeRevenue::get(); - CoretimeRevenue::set(&None); - revenue - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: u32, revenue: Self::Balance) { - CoretimeRevenue::set(&Some((when, revenue))); - } } impl pallet_broker::Config for Runtime { @@ -2250,248 +2231,248 @@ mod runtime { pub struct Runtime; #[runtime::pallet_index(0)] - pub type System = frame_system; + pub type System = frame_system::Pallet; #[runtime::pallet_index(1)] - pub type Utility = pallet_utility; + pub type Utility = pallet_utility::Pallet; #[runtime::pallet_index(2)] - pub type Babe = pallet_babe; + pub type Babe = pallet_babe::Pallet; #[runtime::pallet_index(3)] - pub type Timestamp = pallet_timestamp; + pub type Timestamp = pallet_timestamp::Pallet; // Authorship must be before session in order to note author in the correct session and era // for im-online and staking. #[runtime::pallet_index(4)] - pub type Authorship = pallet_authorship; + pub type Authorship = pallet_authorship::Pallet; #[runtime::pallet_index(5)] - pub type Indices = pallet_indices; + pub type Indices = pallet_indices::Pallet; #[runtime::pallet_index(6)] - pub type Balances = pallet_balances; + pub type Balances = pallet_balances::Pallet; #[runtime::pallet_index(7)] - pub type TransactionPayment = pallet_transaction_payment; + pub type TransactionPayment = pallet_transaction_payment::Pallet; #[runtime::pallet_index(8)] - pub type AssetTxPayment = pallet_asset_tx_payment; + pub type AssetTxPayment = pallet_asset_tx_payment::Pallet; #[runtime::pallet_index(9)] - pub type AssetConversionTxPayment = pallet_asset_conversion_tx_payment; + pub type AssetConversionTxPayment = pallet_asset_conversion_tx_payment::Pallet; #[runtime::pallet_index(10)] - pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase; + pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase::Pallet; #[runtime::pallet_index(11)] - pub type Staking = pallet_staking; + pub type Staking = pallet_staking::Pallet; #[runtime::pallet_index(12)] - pub type Session = pallet_session; + pub type Session = pallet_session::Pallet; #[runtime::pallet_index(13)] - pub type Democracy = pallet_democracy; + pub type Democracy = pallet_democracy::Pallet; #[runtime::pallet_index(14)] - pub type Council = pallet_collective; + pub type Council = pallet_collective::Pallet; #[runtime::pallet_index(15)] - pub type TechnicalCommittee = pallet_collective; + pub type TechnicalCommittee = pallet_collective::Pallet; #[runtime::pallet_index(16)] - pub type Elections = pallet_elections_phragmen; + pub type Elections = pallet_elections_phragmen::Pallet; #[runtime::pallet_index(17)] - pub type TechnicalMembership = pallet_membership; + pub type TechnicalMembership = pallet_membership::Pallet; #[runtime::pallet_index(18)] - pub type Grandpa = pallet_grandpa; + pub type Grandpa = pallet_grandpa::Pallet; #[runtime::pallet_index(19)] - pub type Treasury = pallet_treasury; + pub type Treasury = pallet_treasury::Pallet; #[runtime::pallet_index(20)] - pub type AssetRate = pallet_asset_rate; + pub type AssetRate = pallet_asset_rate::Pallet; #[runtime::pallet_index(21)] - pub type Contracts = pallet_contracts; + pub type Contracts = pallet_contracts::Pallet; #[runtime::pallet_index(22)] - pub type Sudo = pallet_sudo; + pub type Sudo = pallet_sudo::Pallet; #[runtime::pallet_index(23)] - pub type ImOnline = pallet_im_online; + pub type ImOnline = pallet_im_online::Pallet; #[runtime::pallet_index(24)] - pub type AuthorityDiscovery = pallet_authority_discovery; + pub type AuthorityDiscovery = pallet_authority_discovery::Pallet; #[runtime::pallet_index(25)] - pub type Offences = pallet_offences; + pub type Offences = pallet_offences::Pallet; #[runtime::pallet_index(26)] - pub type Historical = pallet_session_historical; + pub type Historical = pallet_session_historical::Pallet; #[runtime::pallet_index(27)] - pub type RandomnessCollectiveFlip = pallet_insecure_randomness_collective_flip; + pub type RandomnessCollectiveFlip = pallet_insecure_randomness_collective_flip::Pallet; #[runtime::pallet_index(28)] - pub type Identity = pallet_identity; + pub type Identity = pallet_identity::Pallet; #[runtime::pallet_index(29)] - pub type Society = pallet_society; + pub type Society = pallet_society::Pallet; #[runtime::pallet_index(30)] - pub type Recovery = pallet_recovery; + pub type Recovery = pallet_recovery::Pallet; #[runtime::pallet_index(31)] - pub type Vesting = pallet_vesting; + pub type Vesting = pallet_vesting::Pallet; #[runtime::pallet_index(32)] - pub type Scheduler = pallet_scheduler; + pub type Scheduler = pallet_scheduler::Pallet; #[runtime::pallet_index(33)] - pub type Glutton = pallet_glutton; + pub type Glutton = pallet_glutton::Pallet; #[runtime::pallet_index(34)] - pub type Preimage = pallet_preimage; + pub type Preimage = pallet_preimage::Pallet; #[runtime::pallet_index(35)] - pub type Proxy = pallet_proxy; + pub type Proxy = pallet_proxy::Pallet; #[runtime::pallet_index(36)] - pub type Multisig = pallet_multisig; + pub type Multisig = pallet_multisig::Pallet; #[runtime::pallet_index(37)] - pub type Bounties = pallet_bounties; + pub type Bounties = pallet_bounties::Pallet; #[runtime::pallet_index(38)] - pub type Tips = pallet_tips; + pub type Tips = pallet_tips::Pallet; #[runtime::pallet_index(39)] - pub type Assets = pallet_assets; + pub type Assets = pallet_assets::Pallet; #[runtime::pallet_index(40)] - pub type PoolAssets = pallet_assets; + pub type PoolAssets = pallet_assets::Pallet; #[runtime::pallet_index(41)] - pub type Beefy = pallet_beefy; + pub type Beefy = pallet_beefy::Pallet; // MMR leaf construction must be after session in order to have a leaf's next_auth_set // refer to block. See issue polkadot-fellows/runtimes#160 for details. #[runtime::pallet_index(42)] - pub type Mmr = pallet_mmr; + pub type Mmr = pallet_mmr::Pallet; #[runtime::pallet_index(43)] - pub type MmrLeaf = pallet_beefy_mmr; + pub type MmrLeaf = pallet_beefy_mmr::Pallet; #[runtime::pallet_index(44)] - pub type Lottery = pallet_lottery; + pub type Lottery = pallet_lottery::Pallet; #[runtime::pallet_index(45)] - pub type Nis = pallet_nis; + pub type Nis = pallet_nis::Pallet; #[runtime::pallet_index(46)] - pub type Uniques = pallet_uniques; + pub type Uniques = pallet_uniques::Pallet; #[runtime::pallet_index(47)] - pub type Nfts = pallet_nfts; + pub type Nfts = pallet_nfts::Pallet; #[runtime::pallet_index(48)] - pub type NftFractionalization = pallet_nft_fractionalization; + pub type NftFractionalization = pallet_nft_fractionalization::Pallet; #[runtime::pallet_index(49)] - pub type Salary = pallet_salary; + pub type Salary = pallet_salary::Pallet; #[runtime::pallet_index(50)] - pub type CoreFellowship = pallet_core_fellowship; + pub type CoreFellowship = pallet_core_fellowship::Pallet; #[runtime::pallet_index(51)] - pub type TransactionStorage = pallet_transaction_storage; + pub type TransactionStorage = pallet_transaction_storage::Pallet; #[runtime::pallet_index(52)] - pub type VoterList = pallet_bags_list; + pub type VoterList = pallet_bags_list::Pallet; #[runtime::pallet_index(53)] - pub type StateTrieMigration = pallet_state_trie_migration; + pub type StateTrieMigration = pallet_state_trie_migration::Pallet; #[runtime::pallet_index(54)] - pub type ChildBounties = pallet_child_bounties; + pub type ChildBounties = pallet_child_bounties::Pallet; #[runtime::pallet_index(55)] - pub type Referenda = pallet_referenda; + pub type Referenda = pallet_referenda::Pallet; #[runtime::pallet_index(56)] - pub type Remark = pallet_remark; + pub type Remark = pallet_remark::Pallet; #[runtime::pallet_index(57)] - pub type RootTesting = pallet_root_testing; + pub type RootTesting = pallet_root_testing::Pallet; #[runtime::pallet_index(58)] - pub type ConvictionVoting = pallet_conviction_voting; + pub type ConvictionVoting = pallet_conviction_voting::Pallet; #[runtime::pallet_index(59)] - pub type Whitelist = pallet_whitelist; + pub type Whitelist = pallet_whitelist::Pallet; #[runtime::pallet_index(60)] - pub type AllianceMotion = pallet_collective; + pub type AllianceMotion = pallet_collective::Pallet; #[runtime::pallet_index(61)] - pub type Alliance = pallet_alliance; + pub type Alliance = pallet_alliance::Pallet; #[runtime::pallet_index(62)] - pub type NominationPools = pallet_nomination_pools; + pub type NominationPools = pallet_nomination_pools::Pallet; #[runtime::pallet_index(63)] - pub type RankedPolls = pallet_referenda; + pub type RankedPolls = pallet_referenda::Pallet; #[runtime::pallet_index(64)] - pub type RankedCollective = pallet_ranked_collective; + pub type RankedCollective = pallet_ranked_collective::Pallet; #[runtime::pallet_index(65)] - pub type AssetConversion = pallet_asset_conversion; + pub type AssetConversion = pallet_asset_conversion::Pallet; #[runtime::pallet_index(66)] - pub type FastUnstake = pallet_fast_unstake; + pub type FastUnstake = pallet_fast_unstake::Pallet; #[runtime::pallet_index(67)] - pub type MessageQueue = pallet_message_queue; + pub type MessageQueue = pallet_message_queue::Pallet; #[runtime::pallet_index(68)] - pub type Pov = frame_benchmarking_pallet_pov; + pub type Pov = frame_benchmarking_pallet_pov::Pallet; #[runtime::pallet_index(69)] - pub type TxPause = pallet_tx_pause; + pub type TxPause = pallet_tx_pause::Pallet; #[runtime::pallet_index(70)] - pub type SafeMode = pallet_safe_mode; + pub type SafeMode = pallet_safe_mode::Pallet; #[runtime::pallet_index(71)] - pub type Statement = pallet_statement; + pub type Statement = pallet_statement::Pallet; #[runtime::pallet_index(72)] - pub type MultiBlockMigrations = pallet_migrations; + pub type MultiBlockMigrations = pallet_migrations::Pallet; #[runtime::pallet_index(73)] - pub type Broker = pallet_broker; + pub type Broker = pallet_broker::Pallet; #[runtime::pallet_index(74)] - pub type TasksExample = pallet_example_tasks; + pub type TasksExample = pallet_example_tasks::Pallet; #[runtime::pallet_index(75)] - pub type Mixnet = pallet_mixnet; + pub type Mixnet = pallet_mixnet::Pallet; #[runtime::pallet_index(76)] - pub type Parameters = pallet_parameters; + pub type Parameters = pallet_parameters::Pallet; #[runtime::pallet_index(77)] - pub type SkipFeelessPayment = pallet_skip_feeless_payment; + pub type SkipFeelessPayment = pallet_skip_feeless_payment::Pallet; #[runtime::pallet_index(78)] - pub type PalletExampleMbms = pallet_example_mbm; + pub type PalletExampleMbms = pallet_example_mbm::Pallet; #[runtime::pallet_index(79)] - pub type AssetConversionMigration = pallet_asset_conversion_ops; + pub type AssetConversionMigration = pallet_asset_conversion_ops::Pallet; } /// The address format for describing accounts. @@ -2569,6 +2550,7 @@ impl pallet_beefy::Config for Runtime { type MaxNominators = ConstU32<0>; type MaxSetIdSessionEntries = BeefySetIdSessionEntries; type OnNewValidatorSet = MmrLeaf; + type AncestryHelper = MmrLeaf; type WeightInfo = (); type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -3053,7 +3035,7 @@ impl_runtime_apis! { } } - #[api_version(3)] + #[api_version(4)] impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { pallet_beefy::GenesisBlock::::get() @@ -3063,7 +3045,7 @@ impl_runtime_apis! { Beefy::validator_set() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -3073,7 +3055,7 @@ impl_runtime_apis! { ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; - Beefy::submit_unsigned_equivocation_report( + Beefy::submit_unsigned_double_voting_report( equivocation_proof, key_owner_proof, ) diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 3ba3f07510e006458cf23b246e293af4e288c624..90c9ee0555cf496dcc371b4f697e64855b6d79f0 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -16,36 +16,36 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -fs_extra = "1" -futures = "0.3.30" +codec = { workspace = true, default-features = true } +fs_extra = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -tempfile = "3.1.0" -frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension" } -frame-system = { path = "../../../frame/system" } -node-cli = { package = "staging-node-cli", path = "../cli" } -node-primitives = { path = "../primitives" } -kitchensink-runtime = { path = "../runtime" } -pallet-asset-conversion = { path = "../../../frame/asset-conversion" } -pallet-assets = { path = "../../../frame/assets" } -pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } -pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } -pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment" } -sc-block-builder = { path = "../../../client/block-builder" } -sc-client-api = { path = "../../../client/api" } -sc-client-db = { path = "../../../client/db", features = ["rocksdb"] } -sc-consensus = { path = "../../../client/consensus/common" } -sc-executor = { path = "../../../client/executor" } -sc-service = { path = "../../../client/service", features = ["rocksdb", "test-helpers"] } -sp-api = { path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-io = { path = "../../../primitives/io" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-timestamp = { path = "../../../primitives/timestamp", default-features = false } -substrate-test-client = { path = "../../../test-utils/client" } +tempfile = { workspace = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +node-cli = { workspace = true } +node-primitives = { workspace = true, default-features = true } +kitchensink-runtime = { workspace = true } +pallet-asset-conversion = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } +pallet-asset-tx-payment = { workspace = true, default-features = true } +pallet-skip-feeless-payment = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { features = ["rocksdb"], workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-service = { features = ["rocksdb", "test-helpers"], workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true } +substrate-test-client = { workspace = true } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index 88585649acfe015e2e1e7ca6285c1a3565ca6f78..083f2191f3c5a9a9b3b41639ed20808173d3a3a4 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -24,8 +24,8 @@ name = "chain-spec-builder" crate-type = ["rlib"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } -sc-chain-spec = { path = "../../../client/chain-spec", features = ["clap"] } +sc-chain-spec = { features = ["clap"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sp-tracing = { path = "../../../primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } diff --git a/substrate/bin/utils/chain-spec-builder/bin/main.rs b/substrate/bin/utils/chain-spec-builder/bin/main.rs index 18da3c30691bd895c833454cb6cd9f6fda91531d..39fa054b4806d396acd557947a957c6cfdd519f7 100644 --- a/substrate/bin/utils/chain-spec-builder/bin/main.rs +++ b/substrate/bin/utils/chain-spec-builder/bin/main.rs @@ -17,16 +17,19 @@ // along with this program. If not, see . use chain_spec_builder::{ - generate_chain_spec_for_runtime, ChainSpecBuilder, ChainSpecBuilderCmd, ConvertToRawCmd, - DisplayPresetCmd, ListPresetsCmd, UpdateCodeCmd, VerifyCmd, + generate_chain_spec_for_runtime, AddCodeSubstituteCmd, ChainSpecBuilder, ChainSpecBuilderCmd, + ConvertToRawCmd, DisplayPresetCmd, ListPresetsCmd, UpdateCodeCmd, VerifyCmd, }; use clap::Parser; use sc_chain_spec::{ - update_code_in_json_chain_spec, GenericChainSpec, GenesisConfigBuilderRuntimeCaller, + set_code_substitute_in_json_chain_spec, update_code_in_json_chain_spec, GenericChainSpec, + GenesisConfigBuilderRuntimeCaller, }; use staging_chain_spec_builder as chain_spec_builder; use std::fs; +type ChainSpec = GenericChainSpec<(), ()>; + //avoid error message escaping fn main() { match inner_main() { @@ -50,7 +53,7 @@ fn inner_main() -> Result<(), String> { ref input_chain_spec, ref runtime_wasm_path, }) => { - let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + let chain_spec = ChainSpec::from_json_file(input_chain_spec.clone())?; let mut chain_spec_json = serde_json::from_str::(&chain_spec.as_json(false)?) @@ -65,8 +68,29 @@ fn inner_main() -> Result<(), String> { .map_err(|e| format!("to pretty failed: {e}"))?; fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; }, + ChainSpecBuilderCmd::AddCodeSubstitute(AddCodeSubstituteCmd { + ref input_chain_spec, + ref runtime_wasm_path, + block_height, + }) => { + let chain_spec = ChainSpec::from_json_file(input_chain_spec.clone())?; + + let mut chain_spec_json = + serde_json::from_str::(&chain_spec.as_json(false)?) + .map_err(|e| format!("Conversion to json failed: {e}"))?; + + set_code_substitute_in_json_chain_spec( + &mut chain_spec_json, + &fs::read(runtime_wasm_path.as_path()) + .map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..], + block_height, + ); + let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json) + .map_err(|e| format!("to pretty failed: {e}"))?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; + }, ChainSpecBuilderCmd::ConvertToRaw(ConvertToRawCmd { ref input_chain_spec }) => { - let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + let chain_spec = ChainSpec::from_json_file(input_chain_spec.clone())?; let chain_spec_json = serde_json::from_str::(&chain_spec.as_json(true)?) @@ -77,7 +101,7 @@ fn inner_main() -> Result<(), String> { fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; }, ChainSpecBuilderCmd::Verify(VerifyCmd { ref input_chain_spec }) => { - let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + let chain_spec = ChainSpec::from_json_file(input_chain_spec.clone())?; let _ = serde_json::from_str::(&chain_spec.as_json(true)?) .map_err(|e| format!("Conversion to json failed: {e}"))?; }, diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 0f7c003fc8c2da2709076ee3c6f517d9a2ae8beb..6c679f109a002401bb6b25718979c2e8fc58d2a9 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -125,7 +125,7 @@ use serde_json::Value; /// A utility to easily create a chain spec definition. #[derive(Debug, Parser)] -#[command(rename_all = "kebab-case")] +#[command(rename_all = "kebab-case", version, about)] pub struct ChainSpecBuilder { #[command(subcommand)] pub command: ChainSpecBuilderCmd, @@ -143,6 +143,7 @@ pub enum ChainSpecBuilderCmd { ConvertToRaw(ConvertToRawCmd), ListPresets(ListPresetsCmd), DisplayPreset(DisplayPresetCmd), + AddCodeSubstitute(AddCodeSubstituteCmd), } /// Create a new chain spec by interacting with the provided runtime wasm blob. @@ -222,6 +223,25 @@ pub struct UpdateCodeCmd { pub runtime_wasm_path: PathBuf, } +/// Add a code substitute in the chain spec. +/// +/// The `codeSubstitute` object of the chain spec will be updated with the block height as key and +/// runtime code as value. This operation supports both plain and raw formats. The `codeSubstitute` +/// field instructs the node to use the provided runtime code at the given block height. This is +/// useful when the chain can not progress on its own due to a bug that prevents block-building. +/// +/// Note: For parachains, the validation function on the relaychain needs to be adjusted too, +/// otherwise blocks built using the substituted parachain runtime will be rejected. +#[derive(Parser, Debug, Clone)] +pub struct AddCodeSubstituteCmd { + /// Chain spec to be updated. + pub input_chain_spec: PathBuf, + /// New runtime wasm blob that should replace the existing code. + pub runtime_wasm_path: PathBuf, + /// The block height at which the code should be substituted. + pub block_height: u64, +} + /// Converts the given chain spec into the raw format. #[derive(Parser, Debug, Clone)] pub struct ConvertToRawCmd { diff --git a/substrate/bin/utils/subkey/Cargo.toml b/substrate/bin/utils/subkey/Cargo.toml index 8dc4bf254b2d44e1f6b5c96ca16a2e8586e0333c..5aa013097c150add192d25c2b658c61720e39bd8 100644 --- a/substrate/bin/utils/subkey/Cargo.toml +++ b/substrate/bin/utils/subkey/Cargo.toml @@ -20,5 +20,5 @@ path = "src/main.rs" name = "subkey" [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -sc-cli = { path = "../../../client/cli" } +clap = { features = ["derive"], workspace = true } +sc-cli = { workspace = true, default-features = true } diff --git a/substrate/client/allocator/Cargo.toml b/substrate/client/allocator/Cargo.toml index 2c268b548ea9c32fabdc82226c77e82a7ef59cea..5a3b05aa8a98d8366433d574e158a8cd9a2f3a64 100644 --- a/substrate/client/allocator/Cargo.toml +++ b/substrate/client/allocator/Cargo.toml @@ -19,5 +19,5 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } thiserror = { workspace = true } -sp-core = { path = "../../primitives/core" } -sp-wasm-interface = { path = "../../primitives/wasm-interface" } +sp-core = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index 147ea2bfbf5df83716c4a7f1f5fb2ade0c41d3f8..a64ee3ab4ce1944672ec87aac34119720834ef59 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -17,30 +17,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -fnv = "1.0.6" -futures = "0.3.30" +], workspace = true } +fnv = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-executor = { path = "../executor" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sc-utils = { path = "../utils" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core", default-features = false } -sp-database = { path = "../../primitives/database" } -sp-externalities = { path = "../../primitives/externalities" } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-state-machine = { path = "../../primitives/state-machine" } -sp-statement-store = { path = "../../primitives/statement-store" } -sp-storage = { path = "../../primitives/storage" } -sp-trie = { path = "../../primitives/trie" } +parking_lot = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-database = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } [dev-dependencies] thiserror = { workspace = true } -sp-test-primitives = { path = "../../primitives/test-primitives" } -substrate-test-runtime = { path = "../../test-utils/runtime" } +sp-test-primitives = { workspace = true } +substrate-test-runtime = { workspace = true } diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs index 31b100433c7086719737275bfaa6b3931bffc88a..0b2a349524018d33b8003b6832363286b77d8503 100644 --- a/substrate/client/api/src/backend.rs +++ b/substrate/client/api/src/backend.rs @@ -217,7 +217,8 @@ pub trait BlockImportOperation { where I: IntoIterator, Option>)>; - /// Mark a block as finalized. + /// Mark a block as finalized, if multiple blocks are finalized in the same operation then they + /// must be marked in ascending order. fn mark_finalized( &mut self, hash: Block::Hash, diff --git a/substrate/client/api/src/leaves.rs b/substrate/client/api/src/leaves.rs index e129de8bf3fad30a0bb76569957c93138c631db8..70efe8b19c627d03741a3ea11b8889332b5a567c 100644 --- a/substrate/client/api/src/leaves.rs +++ b/substrate/client/api/src/leaves.rs @@ -45,33 +45,20 @@ pub struct RemoveOutcome { } /// Removed leaves after a finalization action. -pub struct FinalizationOutcome { - removed: BTreeMap, Vec>, +pub struct FinalizationOutcome +where + I: Iterator, +{ + removed: I, } -impl FinalizationOutcome { - /// Merge with another. This should only be used for displaced items that - /// are produced within one transaction of each other. - pub fn merge(&mut self, mut other: Self) { - // this will ignore keys that are in duplicate, however - // if these are actually produced correctly via the leaf-set within - // one transaction, then there will be no overlap in the keys. - self.removed.append(&mut other.removed); - } - - /// Iterate over all displaced leaves. - pub fn leaves(&self) -> impl Iterator { - self.removed.values().flatten() - } - +impl FinalizationOutcome +where + I: Iterator, +{ /// Constructor - pub fn new(new_displaced: impl Iterator) -> Self { - let mut removed = BTreeMap::, Vec>::new(); - for (hash, number) in new_displaced { - removed.entry(Reverse(number)).or_default().push(hash); - } - - FinalizationOutcome { removed } + pub fn new(new_displaced: I) -> Self { + FinalizationOutcome { removed: new_displaced } } } @@ -86,7 +73,7 @@ pub struct LeafSet { impl LeafSet where H: Clone + PartialEq + Decode + Encode, - N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, + N: std::fmt::Debug + Copy + AtLeast32Bit + Decode + Encode, { /// Construct a new, blank leaf set. pub fn new() -> Self { @@ -117,13 +104,13 @@ where let number = Reverse(number); let removed = if number.0 != N::zero() { - let parent_number = Reverse(number.0.clone() - N::one()); + let parent_number = Reverse(number.0 - N::one()); self.remove_leaf(&parent_number, &parent_hash).then(|| parent_hash) } else { None }; - self.insert_leaf(number.clone(), hash.clone()); + self.insert_leaf(number, hash.clone()); ImportOutcome { inserted: LeafSetItem { hash, number }, removed } } @@ -150,7 +137,7 @@ where let inserted = parent_hash.and_then(|parent_hash| { if number.0 != N::zero() { - let parent_number = Reverse(number.0.clone() - N::one()); + let parent_number = Reverse(number.0 - N::one()); self.insert_leaf(parent_number, parent_hash.clone()); Some(parent_hash) } else { @@ -162,11 +149,12 @@ where } /// Remove all leaves displaced by the last block finalization. - pub fn remove_displaced_leaves(&mut self, displaced_leaves: &FinalizationOutcome) { - for (number, hashes) in &displaced_leaves.removed { - for hash in hashes.iter() { - self.remove_leaf(number, hash); - } + pub fn remove_displaced_leaves(&mut self, displaced_leaves: FinalizationOutcome) + where + I: Iterator, + { + for (number, hash) in displaced_leaves.removed { + self.remove_leaf(&Reverse(number), &hash); } } @@ -186,13 +174,13 @@ where let items = self .storage .iter() - .flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), number.clone()))) + .flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), *number))) .collect::>(); - for (hash, number) in &items { + for (hash, number) in items { if number.0 > best_number { assert!( - self.remove_leaf(number, hash), + self.remove_leaf(&number, &hash), "item comes from an iterator over storage; qed", ); } @@ -207,7 +195,7 @@ where // we need to make sure that the best block exists in the leaf set as // this is an invariant of regular block import. if !leaves_contains_best { - self.insert_leaf(best_number.clone(), best_hash.clone()); + self.insert_leaf(best_number, best_hash.clone()); } } @@ -229,7 +217,7 @@ where column: u32, prefix: &[u8], ) { - let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0.clone(), h.clone())).collect(); + let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0, h.clone())).collect(); tx.set_from_vec(column, prefix, leaves.encode()); } @@ -274,7 +262,7 @@ where /// Returns the highest leaf and all hashes associated to it. pub fn highest_leaf(&self) -> Option<(N, &[H])> { - self.storage.iter().next().map(|(k, v)| (k.0.clone(), &v[..])) + self.storage.iter().next().map(|(k, v)| (k.0, &v[..])) } } @@ -286,13 +274,13 @@ pub struct Undo<'a, H: 'a, N: 'a> { impl<'a, H: 'a, N: 'a> Undo<'a, H, N> where H: Clone + PartialEq + Decode + Encode, - N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, + N: std::fmt::Debug + Copy + AtLeast32Bit + Decode + Encode, { /// Undo an imported block by providing the import operation outcome. /// No additional operations should be performed between import and undo. pub fn undo_import(&mut self, outcome: ImportOutcome) { if let Some(removed_hash) = outcome.removed { - let removed_number = Reverse(outcome.inserted.number.0.clone() - N::one()); + let removed_number = Reverse(outcome.inserted.number.0 - N::one()); self.inner.insert_leaf(removed_number, removed_hash); } self.inner.remove_leaf(&outcome.inserted.number, &outcome.inserted.hash); @@ -302,7 +290,7 @@ where /// No additional operations should be performed between remove and undo. pub fn undo_remove(&mut self, outcome: RemoveOutcome) { if let Some(inserted_hash) = outcome.inserted { - let inserted_number = Reverse(outcome.removed.number.0.clone() - N::one()); + let inserted_number = Reverse(outcome.removed.number.0 - N::one()); self.inner.remove_leaf(&inserted_number, &inserted_hash); } self.inner.insert_leaf(outcome.removed.number, outcome.removed.hash); @@ -310,8 +298,13 @@ where /// Undo a finalization operation by providing the displaced leaves. /// No additional operations should be performed between finalization and undo. - pub fn undo_finalization(&mut self, mut outcome: FinalizationOutcome) { - self.inner.storage.append(&mut outcome.removed); + pub fn undo_finalization(&mut self, outcome: FinalizationOutcome) + where + I: Iterator, + { + for (number, hash) in outcome.removed { + self.inner.storage.entry(Reverse(number)).or_default().push(hash); + } } } diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index 435ca88a80079c9da9510b0f505d21677afb52cf..309c9c542a0b10fa8c7fbff14a07c5622634e8f2 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -17,38 +17,33 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -futures = "0.3.30" -futures-timer = "3.0.1" -ip_network = "0.4.1" -libp2p = { version = "0.51.4", features = ["ed25519", "kad"] } -multihash = { version = "0.17.0", default-features = false, features = ["sha2", "std"] } -linked_hash_set = "0.1.4" +codec = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +ip_network = { workspace = true } +libp2p = { features = ["ed25519", "kad"], workspace = true } +multihash = { workspace = true } +linked_hash_set = { workspace = true } log = { workspace = true, default-features = true } -prost = "0.12.4" -rand = "0.8.5" +prost = { workspace = true } +rand = { workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-client-api = { path = "../api" } -sc-network = { path = "../network" } -sc-network-types = { path = "../network/types" } -sp-api = { path = "../../primitives/api" } -sp-authority-discovery = { path = "../../primitives/authority-discovery" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } -sp-runtime = { path = "../../primitives/runtime" } -async-trait = "0.1.79" -multihash-codetable = { version = "0.1.1", features = [ - "digest", - "serde", - "sha2", -] } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +async-trait = { workspace = true } [dev-dependencies] -quickcheck = { version = "1.0.3", default-features = false } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +quickcheck = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/authority-discovery/src/service.rs b/substrate/client/authority-discovery/src/service.rs index 60c7a2b990378f0c91a16854bdb815de8661c53a..852d3ab80c9b8926d291f5a83249515b1c8fabc6 100644 --- a/substrate/client/authority-discovery/src/service.rs +++ b/substrate/client/authority-discovery/src/service.rs @@ -55,7 +55,7 @@ impl Service { /// [`crate::Worker`] failed. /// /// Note: [`Multiaddr`]s returned always include a [`PeerId`] via a - /// [`libp2p::core::multiaddr::Protocol::P2p`] component. Equality of + /// [`sc_network_types::multiaddr::Protocol::P2p`] component. Equality of /// [`PeerId`]s across [`Multiaddr`]s returned by a single call is not /// enforced today, given that there are still authorities out there /// publishing the addresses of their sentry nodes on the DHT. In the future diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index f20cf6aa2121204063982bc7717bf094ac561c53..1f1cce160786c9ea423fbe764ecaac69232235ca 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -45,10 +45,7 @@ use sc_network::{ event::DhtEvent, multiaddr, KademliaKey, Multiaddr, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, }; -use sc_network_types::{ - multihash::{Code, Multihash}, - PeerId, -}; +use sc_network_types::{multihash::Code, PeerId}; use schema::PeerSignature; use sp_api::{ApiError, ProvideRuntimeApi}; use sp_authority_discovery::{ @@ -247,14 +244,14 @@ where }; let public_addresses = { - let local_peer_id: Multihash = network.local_peer_id().into(); + let local_peer_id = network.local_peer_id(); config .public_addresses .into_iter() .map(|mut address| { if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { - if peer_id != local_peer_id { + if peer_id != *local_peer_id.as_ref() { error!( target: LOG_TARGET, "Discarding invalid local peer ID in public address {address}.", @@ -397,14 +394,13 @@ where debug!( target: LOG_TARGET, - "Authority DHT record peer_id='{local_peer_id}' addresses='{addresses:?}'", + "Publishing authority DHT record peer_id='{local_peer_id}' addresses='{addresses:?}'", ); // The address must include the local peer id. - let local_peer_id: Multihash = local_peer_id.into(); addresses .into_iter() - .map(move |a| a.with(multiaddr::Protocol::P2p(local_peer_id))) + .map(move |a| a.with(multiaddr::Protocol::P2p(*local_peer_id.as_ref()))) } /// Publish own public addresses. diff --git a/substrate/client/authority-discovery/src/worker/addr_cache.rs b/substrate/client/authority-discovery/src/worker/addr_cache.rs index 77cdfbd4f1502574cd5a1aa2434d3c0fd392fd51..13bb990bf8b9908a5ec9a11e205a4a380c5954b4 100644 --- a/substrate/client/authority-discovery/src/worker/addr_cache.rs +++ b/substrate/client/authority-discovery/src/worker/addr_cache.rs @@ -177,7 +177,7 @@ mod tests { use super::*; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; - use sc_network_types::multihash::Multihash; + use sc_network_types::multihash::{Code, Multihash}; use sp_authority_discovery::{AuthorityId, AuthorityPair}; use sp_core::crypto::Pair; @@ -198,10 +198,9 @@ mod tests { impl Arbitrary for TestMultiaddr { fn arbitrary(g: &mut Gen) -> Self { let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); - let peer_id = PeerId::from_multihash( - Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap(), - ) - .unwrap(); + let peer_id = + PeerId::from_multihash(Multihash::wrap(Code::Sha2_256.into(), &seed).unwrap()) + .unwrap(); let multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" .parse::() .unwrap() @@ -217,10 +216,9 @@ mod tests { impl Arbitrary for TestMultiaddrsSamePeerCombo { fn arbitrary(g: &mut Gen) -> Self { let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); - let peer_id = PeerId::from_multihash( - Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap(), - ) - .unwrap(); + let peer_id = + PeerId::from_multihash(Multihash::wrap(Code::Sha2_256.into(), &seed).unwrap()) + .unwrap(); let multiaddr1 = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" .parse::() .unwrap() diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index b75cb463b1a874c48a7b8c4511929eade4245704..e3ae80e14f6ffd3f554f59f3debb45a3b0c04431 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -16,24 +16,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -futures-timer = "3.0.1" +codec = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-block-builder = { path = "../block-builder" } -sc-proposer-metrics = { path = "../proposer-metrics" } -sc-telemetry = { path = "../telemetry" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core" } -sp-inherents = { path = "../../primitives/inherents" } -sp-runtime = { path = "../../primitives/runtime" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-proposer-metrics = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -parking_lot = "0.12.1" -sc-client-api = { path = "../api" } -sc-transaction-pool = { path = "../transaction-pool" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +parking_lot = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 1519c76c42c0efab1fa59c7e53bed41c5e104f9b..74805488792ad5f1eb810d9c22f00d719f1b1b21 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -205,7 +205,11 @@ where ) -> Proposer { let parent_hash = parent_header.hash(); - info!("๐Ÿ™Œ Starting consensus session on top of parent {:?}", parent_hash); + info!( + "๐Ÿ™Œ Starting consensus session on top of parent {:?} (#{})", + parent_hash, + parent_header.number() + ); let proposer = Proposer::<_, _, _, PR> { spawn_handle: self.spawn_handle.clone(), diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 62efe977e989c13bfc6e3fe0fb11d13ac3aca298..47e3fc39c289997bdcdfa094ce65193295ab0220 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +codec = { features = [ "derive", -] } -sp-api = { path = "../../primitives/api" } -sp-block-builder = { path = "../../primitives/block-builder" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-trie = { path = "../../primitives/trie" } -sp-inherents = { path = "../../primitives/inherents" } -sp-runtime = { path = "../../primitives/runtime" } +], workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -sp-state-machine = { path = "../../primitives/state-machine" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +sp-state-machine = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 5b411b642a0e3aa410517621d99f4ab3cd245a74..b3cd4bd57db86c0384c56316315550edda962f24 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -16,31 +16,31 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"], optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -memmap2 = "0.9.3" +clap = { features = ["derive"], optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } +memmap2 = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-client-api = { path = "../api" } -sc-chain-spec-derive = { path = "derive" } -sc-executor = { path = "../executor" } -sp-io = { default-features = false, path = "../../primitives/io" } -sc-network = { path = "../network" } -sc-telemetry = { path = "../telemetry" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-genesis-builder = { path = "../../primitives/genesis-builder" } -sp-runtime = { path = "../../primitives/runtime" } -sp-state-machine = { path = "../../primitives/state-machine" } +sc-client-api = { workspace = true, default-features = true } +sc-chain-spec-derive = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sp-io = { workspace = true } +sc-network = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } log = { workspace = true } -sp-tracing = { path = "../../primitives/tracing" } -array-bytes = "6.2.2" -docify = "0.2.8" +sp-tracing = { workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } +docify = { workspace = true } [dev-dependencies] -substrate-test-runtime = { path = "../../test-utils/runtime" } -sp-keyring = { path = "../../primitives/keyring" } -sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } -sp-consensus-babe = { default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } -regex = "1.6.0" +substrate-test-runtime = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-consensus-babe = { features = ["serde"], workspace = true } +regex = { workspace = true } diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index 521eee578ecae3b03cf86a3b4e3630bb7cd22f02..4ab8c849cc7feb6368c5f74fbbd77703434b7675 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "3.0.0" -proc-macro2 = "1.0.56" +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true } diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index 883cd19adfd1c7543ee919a6e436df1838c6efe2..5f90f549e02262e97c3ae8ea82941838faa5e2f7 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -766,6 +766,16 @@ pub fn update_code_in_json_chain_spec(chain_spec: &mut json::Value, code: &[u8]) } } +/// This function sets a codeSubstitute in the chain spec. +pub fn set_code_substitute_in_json_chain_spec( + chain_spec: &mut json::Value, + code: &[u8], + block_height: u64, +) { + let substitutes = json::json!({"codeSubstitutes":{ &block_height.to_string(): sp_core::bytes::to_hex(code, false) }}); + crate::json_patch::merge(chain_spec, substitutes); +} + #[cfg(test)] mod tests { use super::*; diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index b59ad68610ecee8fc78ea15eb7cd717fe2bb4d40..c43f9e89b8a993f6a349b3eb612b41346acc62ec 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -333,8 +333,8 @@ pub mod json_patch; pub use self::{ chain_spec::{ - update_code_in_json_chain_spec, ChainSpec as GenericChainSpec, ChainSpecBuilder, - NoExtension, + set_code_substitute_in_json_chain_spec, update_code_in_json_chain_spec, + ChainSpec as GenericChainSpec, ChainSpecBuilder, NoExtension, }, extension::{get_extension, get_extension_mut, Extension, Fork, Forks, GetExtension, Group}, genesis_block::{ diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 169ed72c96e489ee35dca02edc6e098c2426eadb..1e4017c23af231baf0946ed26cc37e7767276cbb 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -16,46 +16,46 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -chrono = "0.4.31" -clap = { version = "4.5.3", features = ["derive", "string", "wrap_help"] } -fdlimit = "0.3.0" -futures = "0.3.30" -itertools = "0.11" -libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } +array-bytes = { workspace = true, default-features = true } +chrono = { workspace = true } +clap = { features = ["derive", "string", "wrap_help"], workspace = true } +fdlimit = { workspace = true } +futures = { workspace = true } +itertools = { workspace = true } +libp2p-identity = { features = ["ed25519", "peerid"], workspace = true } log = { workspace = true, default-features = true } -names = { version = "0.14.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12" } -rand = "0.8.5" -regex = "1.6.0" -rpassword = "7.0.0" +names = { workspace = true } +codec = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +regex = { workspace = true } +rpassword = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } # personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 bip39 = { package = "parity-bip39", version = "2.0.1", features = ["rand"] } -tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "signal"] } -sc-client-api = { path = "../api" } -sc-client-db = { path = "../db", default-features = false } -sc-keystore = { path = "../keystore" } -sc-mixnet = { path = "../mixnet" } -sc-network = { path = "../network" } -sc-service = { path = "../service", default-features = false } -sc-telemetry = { path = "../telemetry" } -sc-tracing = { path = "../tracing" } -sc-utils = { path = "../utils" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-keyring = { path = "../../primitives/keyring" } -sp-keystore = { path = "../../primitives/keystore" } -sp-panic-handler = { path = "../../primitives/panic-handler" } -sp-runtime = { path = "../../primitives/runtime" } -sp-version = { path = "../../primitives/version" } +tokio = { features = ["parking_lot", "rt-multi-thread", "signal"], workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } +sc-keystore = { workspace = true, default-features = true } +sc-mixnet = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-service = { workspace = true } +sc-telemetry = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-panic-handler = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } [dev-dependencies] -tempfile = "3.1.0" -futures-timer = "3.0.1" -sp-tracing = { path = "../../primitives/tracing" } +tempfile = { workspace = true } +futures-timer = { workspace = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["rocksdb"] diff --git a/substrate/client/cli/src/lib.rs b/substrate/client/cli/src/lib.rs index 104e8ec8b798ee5b8eb6c9561c6ef0729d7a7b2a..1bb9fec0e27690f9b7ec21918f3dce329100de3a 100644 --- a/substrate/client/cli/src/lib.rs +++ b/substrate/client/cli/src/lib.rs @@ -58,11 +58,11 @@ pub trait SubstrateCli: Sized { /// Implementation version. /// - /// By default this will look like this: + /// By default, it will look like this: /// /// `2.0.0-b950f731c` /// - /// Where the hash is the short commit hash of the commit of in the Git repository. + /// Where the hash is the short hash of the commit in the Git repository. fn impl_version() -> String; /// Executable file name. @@ -199,17 +199,8 @@ pub trait SubstrateCli: Sized { fn create_runner, DVC: DefaultConfigurationValues>( &self, command: &T, - ) -> error::Result> { - let tokio_runtime = build_runtime()?; - - // `capture` needs to be called in a tokio context. - // Also capture them as early as possible. - let signals = tokio_runtime.block_on(async { Signals::capture() })?; - - let config = command.create_configuration(self, tokio_runtime.handle().clone())?; - - command.init(&Self::support_url(), &Self::impl_version(), |_, _| {}, &config)?; - Runner::new(config, tokio_runtime, signals) + ) -> Result> { + self.create_runner_with_logger_hook(command, |_, _| {}) } /// Create a runner for the command provided in argument. The `logger_hook` can be used to setup @@ -231,11 +222,15 @@ pub trait SubstrateCli: Sized { /// } /// } /// ``` - fn create_runner_with_logger_hook( + fn create_runner_with_logger_hook< + T: CliConfiguration, + DVC: DefaultConfigurationValues, + F, + >( &self, command: &T, logger_hook: F, - ) -> error::Result> + ) -> Result> where F: FnOnce(&mut LoggerBuilder, &Configuration), { diff --git a/substrate/client/cli/src/params/pruning_params.rs b/substrate/client/cli/src/params/pruning_params.rs index 25b17b53289818ba02e839e98b9738f380fc1067..6b7b0e7ffa9976e7121b7392fb25f0adaf493c45 100644 --- a/substrate/client/cli/src/params/pruning_params.rs +++ b/substrate/client/cli/src/params/pruning_params.rs @@ -29,11 +29,18 @@ pub struct PruningParams { /// should be pruned (ie, removed) from the database. /// This setting can only be set on the first creation of the database. Every subsequent run /// will load the pruning mode from the database and will error if the stored mode doesn't - /// match this CLI value. It is fine to drop this CLI flag for subsequent runs. + /// match this CLI value. It is fine to drop this CLI flag for subsequent runs. The only + /// exception is that `NUMBER` can change between subsequent runs (increasing it will not + /// lead to restoring pruned state). + /// /// Possible values: - /// - archive: Keep the state of all blocks. - /// - 'archive-canonical' Keep only the state of finalized blocks. - /// - number Keep the state of the last number of finalized blocks. + /// + /// - archive: Keep the data of all blocks. + /// + /// - archive-canonical: Keep only the data of finalized blocks. + /// + /// - NUMBER: Keep the data of the last NUMBER of finalized blocks. + /// /// [default: 256] #[arg(alias = "pruning", long, value_name = "PRUNING_MODE")] pub state_pruning: Option, @@ -42,11 +49,14 @@ pub struct PruningParams { /// /// This mode specifies when the block's body (including justifications) /// should be pruned (ie, removed) from the database. + /// /// Possible values: - /// - 'archive' Keep all blocks. - /// - 'archive-canonical' Keep only finalized blocks. - /// - number - /// Keep the last `number` of finalized blocks. + /// + /// - archive: Keep the data of all blocks. + /// + /// - archive-canonical: Keep only the data of finalized blocks. + /// + /// - NUMBER: Keep the data of the last NUMBER of finalized blocks. #[arg( alias = "keep-blocks", long, @@ -117,3 +127,39 @@ impl Into for DatabasePruningMode { } } } + +#[cfg(test)] +mod tests { + use super::*; + use clap::Parser; + + #[derive(Parser)] + struct Cli { + #[clap(flatten)] + pruning: PruningParams, + } + + #[test] + fn pruning_params_parse_works() { + let Cli { pruning } = + Cli::parse_from(["", "--state-pruning=1000", "--blocks-pruning=1000"]); + + assert!(matches!(pruning.state_pruning, Some(DatabasePruningMode::Custom(1000)))); + assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::Custom(1000))); + + let Cli { pruning } = + Cli::parse_from(["", "--state-pruning=archive", "--blocks-pruning=archive"]); + + assert!(matches!(dbg!(pruning.state_pruning), Some(DatabasePruningMode::Archive))); + assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::Archive)); + + let Cli { pruning } = Cli::parse_from([ + "", + "--state-pruning=archive-canonical", + "--blocks-pruning=archive-canonical", + ]); + + assert!(matches!(dbg!(pruning.state_pruning), Some(DatabasePruningMode::ArchiveCanonical))); + assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::ArchiveCanonical)); + } +} diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index d1460c45356d7ec86204b52c42c08ee28e9c5faf..3a3d7ae18d711e0c9b952e4f1383ae8ecb28fbc3 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -16,37 +16,37 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-block-builder = { path = "../../block-builder" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-consensus-slots = { path = "../slots" } -sc-telemetry = { path = "../../telemetry" } -sp-api = { path = "../../../primitives/api" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-aura = { path = "../../../primitives/consensus/aura" } -sp-consensus-slots = { path = "../../../primitives/consensus/slots" } -sp-core = { path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-slots = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -parking_lot = "0.12.1" -tempfile = "3.1.0" -sc-keystore = { path = "../../keystore" } -sc-network = { path = "../../network" } -sc-network-test = { path = "../../network/test" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -tokio = { version = "1.22.0" } +parking_lot = { workspace = true, default-features = true } +tempfile = { workspace = true } +sc-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-test = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/aura/src/import_queue.rs b/substrate/client/consensus/aura/src/import_queue.rs index a8777ef8788cc3a247d6d09c146f61bf4cb23e62..79f4faa5ebf97657b0f6d02933a1e7f8421d4f56 100644 --- a/substrate/client/consensus/aura/src/import_queue.rs +++ b/substrate/client/consensus/aura/src/import_queue.rs @@ -174,7 +174,7 @@ where CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { // Skip checks that include execution, if being told so or when importing only state. diff --git a/substrate/client/consensus/aura/src/standalone.rs b/substrate/client/consensus/aura/src/standalone.rs index 0f9b8668d4478bfe4dedfc56e234b79acdf14674..c1536d9ef73f38561e9944928b1c7a6c1e985ab6 100644 --- a/substrate/client/consensus/aura/src/standalone.rs +++ b/substrate/client/consensus/aura/src/standalone.rs @@ -24,7 +24,7 @@ use log::trace; use codec::Codec; -use sc_client_api::{backend::AuxStore, UsageProvider}; +use sc_client_api::UsageProvider; use sp_api::{Core, ProvideRuntimeApi}; use sp_application_crypto::{AppCrypto, AppPublic}; use sp_blockchain::Result as CResult; @@ -48,7 +48,7 @@ pub fn slot_duration(client: &C) -> CResult where A: Codec, B: BlockT, - C: AuxStore + ProvideRuntimeApi + UsageProvider, + C: ProvideRuntimeApi + UsageProvider, C::Api: AuraApi, { slot_duration_at(client, client.usage_info().chain.best_hash) @@ -59,7 +59,7 @@ pub fn slot_duration_at(client: &C, block_hash: B::Hash) -> CResult, + C: ProvideRuntimeApi, C::Api: AuraApi, { client.runtime_api().slot_duration(block_hash).map_err(|err| err.into()) diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index c51082a018b5cfd558efe0b76985d6add890057d..bba60bc45ea50d5c14b2d6a742de690ea95c98d6 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -17,41 +17,41 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -num-bigint = "0.4.3" -num-rational = "0.4.1" -num-traits = "0.2.17" -parking_lot = "0.12.1" +num-bigint = { workspace = true } +num-rational = { workspace = true } +num-traits = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } thiserror = { workspace = true } -fork-tree = { path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-consensus-epochs = { path = "../epochs" } -sc-consensus-slots = { path = "../slots" } -sc-telemetry = { path = "../../telemetry" } -sc-transaction-pool-api = { path = "../../transaction-pool/api" } -sp-api = { path = "../../../primitives/api" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-consensus-slots = { path = "../../../primitives/consensus/slots" } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } +fork-tree = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-consensus-slots = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -sc-block-builder = { path = "../../block-builder" } -sp-keyring = { path = "../../../primitives/keyring" } -sc-network-test = { path = "../../network/test" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -tokio = "1.37" +sc-block-builder = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sc-network-test = { workspace = true } +sp-timestamp = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index 4c755df541d70315dea241092145e17c2bd28800..1ef049c3dbcc412ae94bc849b066e2ff154ae57c 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -16,27 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } -futures = "0.3.30" +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } +futures = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -sc-consensus-babe = { path = ".." } -sc-consensus-epochs = { path = "../../epochs" } -sc-rpc-api = { path = "../../../rpc-api" } -sp-api = { path = "../../../../primitives/api" } -sp-application-crypto = { path = "../../../../primitives/application-crypto" } -sp-blockchain = { path = "../../../../primitives/blockchain" } -sp-consensus = { path = "../../../../primitives/consensus/common" } -sp-consensus-babe = { path = "../../../../primitives/consensus/babe" } -sp-core = { path = "../../../../primitives/core" } -sp-keystore = { path = "../../../../primitives/keystore" } -sp-runtime = { path = "../../../../primitives/runtime" } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } -tokio = "1.37" -sc-consensus = { path = "../../common" } -sc-keystore = { path = "../../../keystore" } -sc-transaction-pool-api = { path = "../../../transaction-pool/api" } -sp-keyring = { path = "../../../../primitives/keyring" } -substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } +tokio = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 0c85de24004031fce96be35bc413506069093eb1..0c1eb88758644c0d661c25c4feb670c6054781f4 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1128,7 +1128,7 @@ where CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { trace!( @@ -1681,7 +1681,7 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.inner.check_block(block).await.map_err(Into::into) diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 716067ae4000661beab6aeb90772087720d0a5ae..6f805188b9a42d806f45c01715f1a088770c3bfe 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -143,11 +143,11 @@ thread_local! { pub struct PanickingBlockImport(B); #[async_trait::async_trait] -impl> BlockImport for PanickingBlockImport +impl BlockImport for PanickingBlockImport where - B: Send, + BI: BlockImport + Send + Sync, { - type Error = B::Error; + type Error = BI::Error; async fn import_block( &mut self, @@ -157,7 +157,7 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { Ok(self.0.check_block(block).await.expect("checking block failed")) @@ -198,7 +198,7 @@ impl Verifier for TestVerifier { /// new set of validators to import. If not, err with an Error-Message /// presented to the User in the logs. async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { // apply post-sealing mutations (i.e. stripping seal, if desired). diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index f5528ec5931dbc5d77321e903b57e4ef70349afc..b2031e0d1e077ef1d793faf570698499487aa4b2 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -12,46 +12,46 @@ homepage = "https://substrate.io" workspace = true [dependencies] -array-bytes = "6.2.2" -async-channel = "1.8.0" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -fnv = "1.0.6" -futures = "0.3.30" +array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +fnv = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } thiserror = { workspace = true } -wasm-timer = "0.2.5" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-network = { path = "../../network" } -sc-network-gossip = { path = "../../network-gossip" } -sc-network-sync = { path = "../../network/sync" } -sc-network-types = { path = "../../network/types" } -sc-utils = { path = "../../utils" } -sp-api = { path = "../../../primitives/api" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-arithmetic = { path = "../../../primitives/arithmetic" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-beefy = { path = "../../../primitives/consensus/beefy" } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } -tokio = "1.37" +wasm-timer = { workspace = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-gossip = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } [dev-dependencies] serde = { workspace = true, default-features = true } -tempfile = "3.1.0" -sc-block-builder = { path = "../../block-builder" } -sc-network-test = { path = "../../network/test" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +tempfile = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-network-test = { workspace = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } [features] # This feature adds BLS crypto primitives. It should not be used in production since diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index 84f90622b5c14f8b7cba19749c877a6ce53ec226..7869f5a336b1135bb471fc584b8f826e17fb9d02 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -12,22 +12,22 @@ homepage = "https://substrate.io" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -sc-consensus-beefy = { path = ".." } -sp-consensus-beefy = { path = "../../../../primitives/consensus/beefy" } -sc-rpc = { path = "../../../rpc" } -sp-core = { path = "../../../../primitives/core" } -sp-runtime = { path = "../../../../primitives/runtime" } -sp-application-crypto = { path = "../../../../primitives/application-crypto" } +sc-consensus-beefy = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } -sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } -substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } -tokio = { version = "1.22.0", features = ["macros"] } +sc-rpc = { features = ["test-helpers"], workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/beefy/src/fisherman.rs b/substrate/client/consensus/beefy/src/fisherman.rs index 073fee0bdbdbecb3ee6d869a3e727d3ec62b9c9b..faa4d34eff5ac74b5024f720b975ff72bb5e9b11 100644 --- a/substrate/client/consensus/beefy/src/fisherman.rs +++ b/substrate/client/consensus/beefy/src/fisherman.rs @@ -23,7 +23,7 @@ use sp_api::ProvideRuntimeApi; use sp_application_crypto::RuntimeAppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_beefy::{ - check_equivocation_proof, AuthorityIdBound, BeefyApi, BeefySignatureHasher, DoubleVotingProof, + check_double_voting_proof, AuthorityIdBound, BeefyApi, BeefySignatureHasher, DoubleVotingProof, OpaqueKeyOwnershipProof, ValidatorSetId, }; use sp_runtime::{ @@ -132,7 +132,7 @@ where (active_rounds.validators(), active_rounds.validator_set_id()); let offender_id = proof.offender_id(); - if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { + if !check_double_voting_proof::<_, _, BeefySignatureHasher>(&proof) { debug!(target: LOG_TARGET, "๐Ÿฅฉ Skipping report for bad equivocation {:?}", proof); return Ok(()); } @@ -155,7 +155,7 @@ where for ProvedValidator { key_owner_proof, .. } in key_owner_proofs { self.runtime .runtime_api() - .submit_report_equivocation_unsigned_extrinsic( + .submit_report_double_voting_unsigned_extrinsic( best_block_hash, proof.clone(), key_owner_proof, diff --git a/substrate/client/consensus/beefy/src/import.rs b/substrate/client/consensus/beefy/src/import.rs index c01fb3db4845eb9e413e0dbe781ae395a3b31217..8480268529338fe09333719a2e26141e37c792ea 100644 --- a/substrate/client/consensus/beefy/src/import.rs +++ b/substrate/client/consensus/beefy/src/import.rs @@ -192,7 +192,7 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.inner.check_block(block).await diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 681e11a0c5310f7bfa67aab824e861ef447f3941..d8f5b39dbbaaacc3094bcef31dfd8af618917990 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -314,7 +314,7 @@ sp_api::mock_impl_runtime_apis! { self.inner.validator_set.clone() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( proof: DoubleVotingProof, AuthorityId, Signature>, _dummy: OpaqueKeyOwnershipProof, ) -> Option<()> { diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 3ce4da7ecd56adea78b92a905546c4fa619403d7..4a9f7a2d0e3b0afd6d8ed8b6f3237fcd1c209a49 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -1039,7 +1039,7 @@ pub(crate) mod tests { ecdsa_crypto, known_payloads, known_payloads::MMR_ROOT_ID, mmr::MmrRootProvider, - test_utils::{generate_equivocation_proof, Keyring}, + test_utils::{generate_double_voting_proof, Keyring}, ConsensusLog, Payload, SignedCommitment, }; use sp_runtime::traits::{Header as HeaderT, One}; @@ -1586,7 +1586,7 @@ pub(crate) mod tests { let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); // generate an equivocation proof, with Bob as perpetrator - let good_proof = generate_equivocation_proof( + let good_proof = generate_double_voting_proof( (block_num, payload1.clone(), set_id, &Keyring::Bob), (block_num, payload2.clone(), set_id, &Keyring::Bob), ); @@ -1618,7 +1618,7 @@ pub(crate) mod tests { assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty()); // now let's try reporting a self-equivocation - let self_proof = generate_equivocation_proof( + let self_proof = generate_double_voting_proof( (block_num, payload1.clone(), set_id, &Keyring::Alice), (block_num, payload2.clone(), set_id, &Keyring::Alice), ); diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index 6d642ec78fefa88692503152595f8548d715426c..a6f59e600f269c67ab87f295c268f3d185dafcf4 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -16,24 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -futures = { version = "0.3.30", features = ["thread-pool"] } -futures-timer = "3.0.1" +async-trait = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } -mockall = "0.11.3" -parking_lot = "0.12.1" +mockall = { workspace = true } +parking_lot = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-network-types = { path = "../../network/types" } -sc-utils = { path = "../../utils" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } [dev-dependencies] -sp-test-primitives = { path = "../../../primitives/test-primitives" } +sp-test-primitives = { workspace = true } diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs index d91851aea62cf4564464b67bcd0bdcd5d712e139..c5adbb5a5fca0634b1cdb038c569a1de312cf859 100644 --- a/substrate/client/consensus/common/src/block_import.rs +++ b/substrate/client/consensus/common/src/block_import.rs @@ -307,10 +307,7 @@ pub trait BlockImport { type Error: std::error::Error + Send + 'static; /// Check block preconditions. - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result; + async fn check_block(&self, block: BlockCheckParams) -> Result; /// Import a block. async fn import_block( @@ -324,10 +321,7 @@ impl BlockImport for crate::import_queue::BoxBlockImport { type Error = sp_consensus::error::Error; /// Check block preconditions. - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { + async fn check_block(&self, block: BlockCheckParams) -> Result { (**self).check_block(block).await } @@ -348,10 +342,7 @@ where { type Error = E; - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { + async fn check_block(&self, block: BlockCheckParams) -> Result { (&**self).check_block(block).await } diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs index 371465536c35a5feea5d6e157eb3ee8b019b94db..35fc8ad4a402e72d9f6c8d110a468bea5f3c5195 100644 --- a/substrate/client/consensus/common/src/import_queue.rs +++ b/substrate/client/consensus/common/src/import_queue.rs @@ -28,6 +28,10 @@ //! queues to be instantiated simply. use log::{debug, trace}; +use std::{ + fmt, + time::{Duration, Instant}, +}; use sp_consensus::{error::Error as ConsensusError, BlockOrigin}; use sp_runtime::{ @@ -93,18 +97,18 @@ pub struct IncomingBlock { /// Verify a justification of a block #[async_trait::async_trait] -pub trait Verifier: Send { +pub trait Verifier: Send + Sync { /// Verify the given block data and return the `BlockImportParams` to /// continue the block import process. - async fn verify(&mut self, block: BlockImportParams) - -> Result, String>; + async fn verify(&self, block: BlockImportParams) -> Result, String>; } /// Blocks import queue API. /// /// The `import_*` methods can be called in order to send elements for the import queue to verify. pub trait ImportQueueService: Send { - /// Import bunch of blocks. + /// Import bunch of blocks, every next block must be an ancestor of the previous block in the + /// list. fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); /// Import block justifications. @@ -165,16 +169,16 @@ pub trait Link: Send { /// Block import successful result. #[derive(Debug, PartialEq)] -pub enum BlockImportStatus { +pub enum BlockImportStatus { /// Imported known block. - ImportedKnown(N, Option), + ImportedKnown(BlockNumber, Option), /// Imported unknown block. - ImportedUnknown(N, ImportedAux, Option), + ImportedUnknown(BlockNumber, ImportedAux, Option), } -impl BlockImportStatus { +impl BlockImportStatus { /// Returns the imported block number. - pub fn number(&self) -> &N { + pub fn number(&self) -> &BlockNumber { match self { BlockImportStatus::ImportedKnown(n, _) | BlockImportStatus::ImportedUnknown(n, _, _) => n, @@ -223,44 +227,30 @@ pub async fn import_single_block>( block: IncomingBlock, verifier: &mut V, ) -> BlockImportResult { - import_single_block_metered(import_handle, block_origin, block, verifier, None).await + match verify_single_block_metered(import_handle, block_origin, block, verifier, None).await? { + SingleBlockVerificationOutcome::Imported(import_status) => Ok(import_status), + SingleBlockVerificationOutcome::Verified(import_parameters) => + import_single_block_metered(import_handle, import_parameters, None).await, + } } -/// Single block import function with metering. -pub(crate) async fn import_single_block_metered>( - import_handle: &mut impl BlockImport, - block_origin: BlockOrigin, - block: IncomingBlock, - verifier: &mut V, - metrics: Option, -) -> BlockImportResult { - let peer = block.origin; - - let (header, justifications) = match (block.header, block.justifications) { - (Some(header), justifications) => (header, justifications), - (None, _) => { - if let Some(ref peer) = peer { - debug!(target: LOG_TARGET, "Header {} was not provided by {} ", block.hash, peer); - } else { - debug!(target: LOG_TARGET, "Header {} was not provided ", block.hash); - } - return Err(BlockImportError::IncompleteHeader(peer)) - }, - }; - - trace!(target: LOG_TARGET, "Header {} has {:?} logs", block.hash, header.digest().logs().len()); - - let number = *header.number(); - let hash = block.hash; - let parent_hash = *header.parent_hash(); - - let import_handler = |import| match import { +fn import_handler( + number: NumberFor, + hash: Block::Hash, + parent_hash: Block::Hash, + block_origin: Option, + import: Result, +) -> Result>, BlockImportError> +where + Block: BlockT, +{ + match import { Ok(ImportResult::AlreadyInChain) => { trace!(target: LOG_TARGET, "Block already in chain {}: {:?}", number, hash); - Ok(BlockImportStatus::ImportedKnown(number, peer)) + Ok(BlockImportStatus::ImportedKnown(number, block_origin)) }, Ok(ImportResult::Imported(aux)) => - Ok(BlockImportStatus::ImportedUnknown(number, aux, peer)), + Ok(BlockImportStatus::ImportedUnknown(number, aux, block_origin)), Ok(ImportResult::MissingState) => { debug!( target: LOG_TARGET, @@ -277,15 +267,60 @@ pub(crate) async fn import_single_block_metered>( }, Ok(ImportResult::KnownBad) => { debug!(target: LOG_TARGET, "Peer gave us a bad block {}: {:?}", number, hash); - Err(BlockImportError::BadBlock(peer)) + Err(BlockImportError::BadBlock(block_origin)) }, Err(e) => { debug!(target: LOG_TARGET, "Error importing block {}: {:?}: {}", number, hash, e); Err(BlockImportError::Other(e)) }, + } +} + +pub(crate) enum SingleBlockVerificationOutcome { + /// Block is already imported. + Imported(BlockImportStatus>), + /// Block is verified, but needs to be imported. + Verified(SingleBlockImportParameters), +} + +pub(crate) struct SingleBlockImportParameters { + import_block: BlockImportParams, + hash: Block::Hash, + block_origin: Option, + verification_time: Duration, +} + +/// Single block import function with metering. +pub(crate) async fn verify_single_block_metered>( + import_handle: &impl BlockImport, + block_origin: BlockOrigin, + block: IncomingBlock, + verifier: &mut V, + metrics: Option<&Metrics>, +) -> Result, BlockImportError> { + let peer = block.origin; + let justifications = block.justifications; + + let Some(header) = block.header else { + if let Some(ref peer) = peer { + debug!(target: LOG_TARGET, "Header {} was not provided by {peer} ", block.hash); + } else { + debug!(target: LOG_TARGET, "Header {} was not provided ", block.hash); + } + return Err(BlockImportError::IncompleteHeader(peer)) }; - match import_handler( + trace!(target: LOG_TARGET, "Header {} has {:?} logs", block.hash, header.digest().logs().len()); + + let number = *header.number(); + let hash = block.hash; + let parent_hash = *header.parent_hash(); + + match import_handler::( + number, + hash, + parent_hash, + peer, import_handle .check_block(BlockCheckParams { hash, @@ -298,10 +333,13 @@ pub(crate) async fn import_single_block_metered>( .await, )? { BlockImportStatus::ImportedUnknown { .. } => (), - r => return Ok(r), // Any other successful result means that the block is already imported. + r => { + // Any other successful result means that the block is already imported. + return Ok(SingleBlockVerificationOutcome::Imported(r)) + }, } - let started = std::time::Instant::now(); + let started = Instant::now(); let mut import_block = BlockImportParams::new(block_origin, header); import_block.body = block.body; @@ -332,19 +370,42 @@ pub(crate) async fn import_single_block_metered>( } else { trace!(target: LOG_TARGET, "Verifying {}({}) failed: {}", number, hash, msg); } - if let Some(metrics) = metrics.as_ref() { + if let Some(metrics) = metrics { metrics.report_verification(false, started.elapsed()); } BlockImportError::VerificationFailed(peer, msg) })?; - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(true, started.elapsed()); + let verification_time = started.elapsed(); + if let Some(metrics) = metrics { + metrics.report_verification(true, verification_time); } + Ok(SingleBlockVerificationOutcome::Verified(SingleBlockImportParameters { + import_block, + hash, + block_origin: peer, + verification_time, + })) +} + +pub(crate) async fn import_single_block_metered( + import_handle: &mut impl BlockImport, + import_parameters: SingleBlockImportParameters, + metrics: Option<&Metrics>, +) -> BlockImportResult { + let started = Instant::now(); + + let SingleBlockImportParameters { import_block, hash, block_origin, verification_time } = + import_parameters; + + let number = *import_block.header.number(); + let parent_hash = *import_block.header.parent_hash(); + let imported = import_handle.import_block(import_block).await; - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification_and_import(started.elapsed()); + if let Some(metrics) = metrics { + metrics.report_verification_and_import(started.elapsed() + verification_time); } - import_handler(imported) + + import_handler::(number, hash, parent_hash, block_origin, imported) } diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs index f4f618d1b31825e17987130b20a96c21e475b098..05f2b252796146f62ba7c258b208fa4c2eb4a3d1 100644 --- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs +++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs @@ -19,7 +19,6 @@ use futures::{ prelude::*, task::{Context, Poll}, }; -use futures_timer::Delay; use log::{debug, trace}; use prometheus_endpoint::Registry; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -28,14 +27,14 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor}, Justification, Justifications, }; -use std::{pin::Pin, time::Duration}; +use std::pin::Pin; use crate::{ import_queue::{ buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, - import_single_block_metered, BlockImportError, BlockImportStatus, BoxBlockImport, - BoxJustificationImport, ImportQueue, ImportQueueService, IncomingBlock, Link, - RuntimeOrigin, Verifier, LOG_TARGET, + import_single_block_metered, verify_single_block_metered, BlockImportError, + BlockImportStatus, BoxBlockImport, BoxJustificationImport, ImportQueue, ImportQueueService, + IncomingBlock, Link, RuntimeOrigin, SingleBlockVerificationOutcome, Verifier, LOG_TARGET, }, metrics::Metrics, }; @@ -61,13 +60,16 @@ impl BasicQueue { /// Instantiate a new basic queue, with given verifier. /// /// This creates a background task, and calls `on_start` on the justification importer. - pub fn new>( + pub fn new( verifier: V, block_import: BoxBlockImport, justification_import: Option>, spawner: &impl sp_core::traits::SpawnEssentialNamed, prometheus_registry: Option<&Registry>, - ) -> Self { + ) -> Self + where + V: Verifier + 'static, + { let (result_sender, result_port) = buffered_link::buffered_link(100_000); let metrics = prometheus_registry.and_then(|r| { @@ -224,7 +226,6 @@ async fn block_import_process( mut result_sender: BufferedLinkSender, mut block_import_receiver: TracingUnboundedReceiver>, metrics: Option, - delay_between_blocks: Duration, ) { loop { let worker_messages::ImportBlocks(origin, blocks) = match block_import_receiver.next().await @@ -239,15 +240,9 @@ async fn block_import_process( }, }; - let res = import_many_blocks( - &mut block_import, - origin, - blocks, - &mut verifier, - delay_between_blocks, - metrics.clone(), - ) - .await; + let res = + import_many_blocks(&mut block_import, origin, blocks, &mut verifier, metrics.clone()) + .await; result_sender.blocks_processed(res.imported, res.block_count, res.results); } @@ -260,7 +255,7 @@ struct BlockImportWorker { } impl BlockImportWorker { - fn new>( + fn new( result_sender: BufferedLinkSender, verifier: V, block_import: BoxBlockImport, @@ -270,19 +265,20 @@ impl BlockImportWorker { impl Future + Send, TracingUnboundedSender>, TracingUnboundedSender>, - ) { + ) + where + V: Verifier + 'static, + { use worker_messages::*; let (justification_sender, mut justification_port) = tracing_unbounded("mpsc_import_queue_worker_justification", 100_000); - let (block_import_sender, block_import_port) = + let (block_import_sender, block_import_receiver) = tracing_unbounded("mpsc_import_queue_worker_blocks", 100_000); let mut worker = BlockImportWorker { result_sender, justification_import, metrics }; - let delay_between_blocks = Duration::default(); - let future = async move { // Let's initialize `justification_import` if let Some(justification_import) = worker.justification_import.as_mut() { @@ -295,9 +291,8 @@ impl BlockImportWorker { block_import, verifier, worker.result_sender.clone(), - block_import_port, + block_import_receiver, worker.metrics.clone(), - delay_between_blocks, ); futures::pin_mut!(block_import_process); @@ -394,7 +389,6 @@ async fn import_many_blocks>( blocks_origin: BlockOrigin, blocks: Vec>, verifier: &mut V, - delay_between_blocks: Duration, metrics: Option, ) -> ImportManyBlocksResult { let count = blocks.len(); @@ -431,15 +425,22 @@ async fn import_many_blocks>( let import_result = if has_error { Err(BlockImportError::Cancelled) } else { - // The actual import. - import_single_block_metered( + let verification_fut = verify_single_block_metered( import_handle, blocks_origin, block, verifier, - metrics.clone(), - ) - .await + metrics.as_ref(), + ); + match verification_fut.await { + Ok(SingleBlockVerificationOutcome::Imported(import_status)) => Ok(import_status), + Ok(SingleBlockVerificationOutcome::Verified(import_parameters)) => { + // The actual import. + import_single_block_metered(import_handle, import_parameters, metrics.as_ref()) + .await + }, + Err(e) => Err(e), + } }; if let Some(metrics) = metrics.as_ref() { @@ -460,11 +461,7 @@ async fn import_many_blocks>( results.push((import_result, block_hash)); - if delay_between_blocks != Duration::default() && !has_error { - Delay::new(delay_between_blocks).await; - } else { - Yield::new().await - } + Yield::new().await } } @@ -510,7 +507,7 @@ mod tests { #[async_trait::async_trait] impl Verifier for () { async fn verify( - &mut self, + &self, block: BlockImportParams, ) -> Result, String> { Ok(BlockImportParams::new(block.origin, block.header)) @@ -522,7 +519,7 @@ mod tests { type Error = sp_consensus::Error; async fn check_block( - &mut self, + &self, _block: BlockCheckParams, ) -> Result { Ok(ImportResult::imported(false)) diff --git a/substrate/client/consensus/epochs/Cargo.toml b/substrate/client/consensus/epochs/Cargo.toml index e409e171e477c2452903a09ee78916808a210011..127cc9ebec207b27f9bf2f54c9e07184b290ebae 100644 --- a/substrate/client/consensus/epochs/Cargo.toml +++ b/substrate/client/consensus/epochs/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -fork-tree = { path = "../../../utils/fork-tree" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-runtime = { path = "../../../primitives/runtime" } +codec = { features = ["derive"], workspace = true, default-features = true } +fork-tree = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index b03a263ae0a37d3a304b54096ad71a06b89c99bd..e49c7c9f0d7a93e727453592d7f300d54bc96acf 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -17,51 +17,51 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ahash = "0.8.2" -array-bytes = "6.2.2" -async-trait = "0.1.79" -dyn-clone = "1.0" -finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.30" -futures-timer = "3.0.1" +ahash = { workspace = true } +array-bytes = { workspace = true, default-features = true } +async-trait = { workspace = true } +dyn-clone = { workspace = true } +finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -parking_lot = "0.12.1" -rand = "0.8.5" +codec = { features = ["derive"], workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -fork-tree = { path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-block-builder = { path = "../../block-builder" } -sc-chain-spec = { path = "../../chain-spec" } -sc-client-api = { path = "../../api" } -sc-transaction-pool-api = { path = "../../transaction-pool/api" } -sc-consensus = { path = "../common" } -sc-network = { path = "../../network" } -sc-network-gossip = { path = "../../network-gossip" } -sc-network-common = { path = "../../network/common" } -sc-network-sync = { path = "../../network/sync" } -sc-network-types = { path = "../../network/types" } -sc-telemetry = { path = "../../telemetry" } -sc-utils = { path = "../../utils" } -sp-api = { path = "../../../primitives/api" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-arithmetic = { path = "../../../primitives/arithmetic" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } +fork-tree = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-gossip = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1.3.0" -finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } +assert_matches = { workspace = true } +finality-grandpa = { features = ["derive-codec", "test-helpers"], workspace = true, default-features = true } serde = { workspace = true, default-features = true } -tokio = "1.37" -sc-network = { path = "../../network" } -sc-network-test = { path = "../../network/test" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +tokio = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-test = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index a9437a9be07544060e2b05aff4a86e0edcc3dcc5..0215fe2e3e64244ac5143dff0b18237fedc23bba 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -13,25 +13,25 @@ homepage = "https://substrate.io" workspace = true [dependencies] -finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.30" -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } +futures = { workspace = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -sc-client-api = { path = "../../../api" } -sc-consensus-grandpa = { path = ".." } -sc-rpc = { path = "../../../rpc" } -sp-blockchain = { path = "../../../../primitives/blockchain" } -sp-core = { path = "../../../../primitives/core" } -sp-runtime = { path = "../../../../primitives/runtime" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -sc-block-builder = { path = "../../../block-builder" } -sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } -sp-core = { path = "../../../../primitives/core" } -sp-consensus-grandpa = { path = "../../../../primitives/consensus/grandpa" } -sp-keyring = { path = "../../../../primitives/keyring" } -substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } -tokio = { version = "1.22.0", features = ["macros"] } +sc-block-builder = { workspace = true, default-features = true } +sc-rpc = { features = ["test-helpers"], workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/src/import.rs b/substrate/client/consensus/grandpa/src/import.rs index b594c0f678cea64efae7980ee17a9876ce6d5013..8b7b02f180ecd582063c5b02e1f4217c0a507e13 100644 --- a/substrate/client/consensus/grandpa/src/import.rs +++ b/substrate/client/consensus/grandpa/src/import.rs @@ -518,7 +518,7 @@ where Client: ClientForGrandpa, Client::Api: GrandpaApi, for<'a> &'a Client: BlockImport, - SC: Send, + SC: Send + Sync, { type Error = ConsensusError; @@ -697,7 +697,7 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.inner.check_block(block).await diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 33f5bf1f8c1501e0e366edab6c3606716aeb47ab..3d74eda8fa01f838b063c134163a61a8d98af081 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -16,37 +16,37 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } -assert_matches = "1.3.0" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -futures-timer = "3.0.1" +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } +assert_matches = { workspace = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-consensus-aura = { path = "../aura" } -sc-consensus-babe = { path = "../babe" } -sc-consensus-epochs = { path = "../epochs" } -sc-transaction-pool = { path = "../../transaction-pool" } -sc-transaction-pool-api = { path = "../../transaction-pool/api" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-aura = { path = "../../../primitives/consensus/aura" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-consensus-slots = { path = "../../../primitives/consensus/slots" } -sp-core = { path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-timestamp = { path = "../../../primitives/timestamp" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } [dev-dependencies] -tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } -sc-basic-authorship = { path = "../../basic-authorship" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool" } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime-transaction-pool = { workspace = true } diff --git a/substrate/client/consensus/manual-seal/src/consensus/babe.rs b/substrate/client/consensus/manual-seal/src/consensus/babe.rs index bc56ce0227142fee2c001c39ce8d31cd9e6fb9b5..a68e46f0134d655d1b034b2b4a40727627724164 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/babe.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/babe.rs @@ -96,7 +96,7 @@ where C: HeaderBackend + HeaderMetadata, { async fn verify( - &mut self, + &self, mut import_params: BlockImportParams, ) -> Result, String> { import_params.finalized = false; diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index 8fc7e7ecab2f45cf8359c2f449dde3b480bb3ad3..39f8f8609d8d7f35867cc8108ad6667263fe5b74 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -65,7 +65,7 @@ struct ManualSealVerifier; #[async_trait::async_trait] impl Verifier for ManualSealVerifier { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { block.finalized = false; diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index 51a2be1b6cf5d4be2d5a5c3af6b0e6ea2bc25406..f2a071ec25c2c3fb97e64229e2fac662532d5969 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.1" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sp-api = { path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-pow = { path = "../../../primitives/consensus/pow" } -sp-core = { path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-runtime = { path = "../../../primitives/runtime" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-pow = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs index ee5c1dfc6f11a26599c0f01efee9224caded43cd..50e9533abb36ab24e5d4942d154a378f84c4beec 100644 --- a/substrate/client/consensus/pow/src/lib.rs +++ b/substrate/client/consensus/pow/src/lib.rs @@ -312,10 +312,7 @@ where { type Error = ConsensusError; - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { + async fn check_block(&self, block: BlockCheckParams) -> Result { self.inner.check_block(block).await.map_err(Into::into) } @@ -442,7 +439,7 @@ where Algorithm::Difficulty: 'static + Send, { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { let hash = block.header.hash(); diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index 8e88ee68d7d739a888f3b0e32b7a8fee3ac1e41c..2b795b13f8e338d6ed982ef2bd261e76336e683d 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -17,22 +17,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -futures-timer = "3.0.1" +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-telemetry = { path = "../../telemetry" } -sp-arithmetic = { path = "../../../primitives/arithmetic" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-slots = { path = "../../../primitives/consensus/slots" } -sp-core = { path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } [dev-dependencies] -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index d9d792005312503f48bb2d628235794952104ccd..7cdf90877dffad20f91a1d1f846648cc22dfe4ea 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -29,8 +29,8 @@ mod aux_schema; mod slots; pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; -pub use slots::SlotInfo; use slots::Slots; +pub use slots::{time_until_next_slot, SlotInfo}; use futures::{future::Either, Future, TryFutureExt}; use futures_timer::Delay; diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index b10c42d50f0bcbf9aed764a2df85cffe7c6baf66..c8372701ac3290377ae54f0aed7c7eb8ed781511 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -16,38 +16,38 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +codec = { features = [ "derive", -] } -hash-db = "0.16.0" -kvdb = "0.13.0" -kvdb-memorydb = "0.13.0" -kvdb-rocksdb = { version = "0.19.0", optional = true } -linked-hash-map = "0.5.4" +], workspace = true, default-features = true } +hash-db = { workspace = true, default-features = true } +kvdb = { workspace = true } +kvdb-memorydb = { workspace = true } +kvdb-rocksdb = { optional = true, workspace = true } +linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } -parity-db = "0.4.12" -parking_lot = "0.12.1" -sc-client-api = { path = "../api" } -sc-state-db = { path = "../state-db" } -schnellru = "0.2.1" -sp-arithmetic = { path = "../../primitives/arithmetic" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-database = { path = "../../primitives/database" } -sp-runtime = { path = "../../primitives/runtime" } -sp-state-machine = { path = "../../primitives/state-machine" } -sp-trie = { path = "../../primitives/trie" } +parity-db = { workspace = true } +parking_lot = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-state-db = { workspace = true, default-features = true } +schnellru = { workspace = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-database = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } [dev-dependencies] -criterion = "0.5.1" -kvdb-rocksdb = "0.19.0" -rand = "0.8.5" -tempfile = "3.1.0" -quickcheck = { version = "1.0.3", default-features = false } -kitchensink-runtime = { path = "../../bin/node/runtime" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -array-bytes = "6.2.2" +criterion = { workspace = true, default-features = true } +kvdb-rocksdb = { workspace = true } +rand = { workspace = true, default-features = true } +tempfile = { workspace = true } +quickcheck = { workspace = true } +kitchensink-runtime = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +array-bytes = { workspace = true, default-features = true } [features] default = [] diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 36f9aea817c9c7031ae69a588a9385e88e786b0c..e95cd9e4ad5fd3e527f3f870aaf40948b238c9a2 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -1357,6 +1357,8 @@ impl Backend { Ok(()) } + /// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls + /// for performance reasons. fn finalize_block_with_transaction( &self, transaction: &mut Transaction, @@ -1365,6 +1367,7 @@ impl Backend { last_finalized: Option, justification: Option, current_transaction_justifications: &mut HashMap, + remove_displaced: bool, ) -> ClientResult> { // TODO: ensure best chain contains this block. let number = *header.number(); @@ -1377,6 +1380,7 @@ impl Backend { hash, with_state, current_transaction_justifications, + remove_displaced, )?; if let Some(justification) = justification { @@ -1454,7 +1458,8 @@ impl Backend { let mut current_transaction_justifications: HashMap = HashMap::new(); - for (block_hash, justification) in operation.finalized_blocks { + let mut finalized_blocks = operation.finalized_blocks.into_iter().peekable(); + while let Some((block_hash, justification)) = finalized_blocks.next() { let block_header = self.blockchain.expect_header(block_hash)?; meta_updates.push(self.finalize_block_with_transaction( &mut transaction, @@ -1463,6 +1468,7 @@ impl Backend { Some(last_finalized_hash), justification, &mut current_transaction_justifications, + finalized_blocks.peek().is_none(), )?); last_finalized_hash = block_hash; last_finalized_num = *block_header.number(); @@ -1642,6 +1648,7 @@ impl Backend { hash, operation.commit_state, &mut current_transaction_justifications, + true, )?; } else { // canonicalize blocks which are old enough, regardless of finality. @@ -1766,9 +1773,10 @@ impl Backend { Ok(()) } - // write stuff to a transaction after a new block is finalized. - // this canonicalizes finalized blocks. Fails if called with a block which - // was not a child of the last finalized block. + // Write stuff to a transaction after a new block is finalized. This canonicalizes finalized + // blocks. Fails if called with a block which was not a child of the last finalized block. + /// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls + /// for performance reasons. fn note_finalized( &self, transaction: &mut Transaction, @@ -1776,6 +1784,7 @@ impl Backend { f_hash: Block::Hash, with_state: bool, current_transaction_justifications: &mut HashMap, + remove_displaced: bool, ) -> ClientResult<()> { let f_num = *f_header.number(); @@ -1800,13 +1809,19 @@ impl Backend { apply_state_commit(transaction, commit); } - let new_displaced = self.blockchain.displaced_leaves_after_finalizing(f_hash, f_num)?; - let finalization_outcome = - FinalizationOutcome::new(new_displaced.displaced_leaves.clone().into_iter()); + if remove_displaced { + let new_displaced = self.blockchain.displaced_leaves_after_finalizing(f_hash, f_num)?; - self.blockchain.leaves.write().remove_displaced_leaves(&finalization_outcome); + self.blockchain.leaves.write().remove_displaced_leaves(FinalizationOutcome::new( + new_displaced.displaced_leaves.iter().copied(), + )); - self.prune_blocks(transaction, f_num, &new_displaced, current_transaction_justifications)?; + if !matches!(self.blocks_pruning, BlocksPruning::KeepAll) { + self.prune_displaced_branches(transaction, &new_displaced)?; + } + } + + self.prune_blocks(transaction, f_num, current_transaction_justifications)?; Ok(()) } @@ -1815,39 +1830,29 @@ impl Backend { &self, transaction: &mut Transaction, finalized_number: NumberFor, - displaced: &DisplacedLeavesAfterFinalization, current_transaction_justifications: &mut HashMap, ) -> ClientResult<()> { - match self.blocks_pruning { - BlocksPruning::KeepAll => {}, - BlocksPruning::Some(blocks_pruning) => { - // Always keep the last finalized block - let keep = std::cmp::max(blocks_pruning, 1); - if finalized_number >= keep.into() { - let number = finalized_number.saturating_sub(keep.into()); - - // Before we prune a block, check if it is pinned - if let Some(hash) = self.blockchain.hash(number)? { - self.blockchain.insert_persisted_body_if_pinned(hash)?; - - // If the block was finalized in this transaction, it will not be in the db - // yet. - if let Some(justification) = - current_transaction_justifications.remove(&hash) - { - self.blockchain.insert_justifications_if_pinned(hash, justification); - } else { - self.blockchain.insert_persisted_justifications_if_pinned(hash)?; - } - }; + if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning { + // Always keep the last finalized block + let keep = std::cmp::max(blocks_pruning, 1); + if finalized_number >= keep.into() { + let number = finalized_number.saturating_sub(keep.into()); + + // Before we prune a block, check if it is pinned + if let Some(hash) = self.blockchain.hash(number)? { + self.blockchain.insert_persisted_body_if_pinned(hash)?; + + // If the block was finalized in this transaction, it will not be in the db + // yet. + if let Some(justification) = current_transaction_justifications.remove(&hash) { + self.blockchain.insert_justifications_if_pinned(hash, justification); + } else { + self.blockchain.insert_persisted_justifications_if_pinned(hash)?; + } + }; - self.prune_block(transaction, BlockId::::number(number))?; - } - self.prune_displaced_branches(transaction, displaced)?; - }, - BlocksPruning::KeepFinalized => { - self.prune_displaced_branches(transaction, displaced)?; - }, + self.prune_block(transaction, BlockId::::number(number))?; + } } Ok(()) } @@ -1858,11 +1863,9 @@ impl Backend { displaced: &DisplacedLeavesAfterFinalization, ) -> ClientResult<()> { // Discard all blocks from displaced branches - for (_, tree_route) in displaced.tree_routes.iter() { - for r in tree_route.retracted() { - self.blockchain.insert_persisted_body_if_pinned(r.hash)?; - self.prune_block(transaction, BlockId::::hash(r.hash))?; - } + for &hash in displaced.displaced_blocks.iter() { + self.blockchain.insert_persisted_body_if_pinned(hash)?; + self.prune_block(transaction, BlockId::::hash(hash))?; } Ok(()) } @@ -2110,6 +2113,7 @@ impl sc_client_api::backend::Backend for Backend { None, justification, &mut current_transaction_justifications, + true, )?; self.storage.db.commit(transaction)?; @@ -3108,6 +3112,122 @@ pub(crate) mod tests { } } + #[test] + fn displaced_leaves_after_finalizing_works() { + let backend = Backend::::new_test(1000, 100); + let blockchain = backend.blockchain(); + let genesis_number = 0; + let genesis_hash = + insert_header(&backend, genesis_number, Default::default(), None, Default::default()); + + // fork from genesis: 3 prong. + // block 0 -> a1 -> a2 -> a3 + // \ + // -> b1 -> b2 -> c1 -> c2 + // \ + // -> d1 -> d2 + let a1_number = 1; + let a1_hash = insert_header(&backend, a1_number, genesis_hash, None, Default::default()); + let a2_number = 2; + let a2_hash = insert_header(&backend, a2_number, a1_hash, None, Default::default()); + let a3_number = 3; + let a3_hash = insert_header(&backend, a3_number, a2_hash, None, Default::default()); + + { + let displaced = blockchain + .displaced_leaves_after_finalizing(genesis_hash, genesis_number) + .unwrap(); + assert_eq!(displaced.displaced_leaves, vec![]); + assert_eq!(displaced.displaced_blocks, vec![]); + } + { + let displaced_a1 = + blockchain.displaced_leaves_after_finalizing(a1_hash, a1_number).unwrap(); + assert_eq!(displaced_a1.displaced_leaves, vec![]); + assert_eq!(displaced_a1.displaced_blocks, vec![]); + + let displaced_a2 = + blockchain.displaced_leaves_after_finalizing(a2_hash, a3_number).unwrap(); + assert_eq!(displaced_a2.displaced_leaves, vec![]); + assert_eq!(displaced_a2.displaced_blocks, vec![]); + + let displaced_a3 = + blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + assert_eq!(displaced_a3.displaced_leaves, vec![]); + assert_eq!(displaced_a3.displaced_blocks, vec![]); + } + + // fork from genesis: 2 prong. + let b1_number = 1; + let b1_hash = insert_header(&backend, b1_number, genesis_hash, None, H256::from([1; 32])); + let b2_number = 2; + let b2_hash = insert_header(&backend, b2_number, b1_hash, None, Default::default()); + + // fork from b2. + let c1_number = 3; + let c1_hash = insert_header(&backend, c1_number, b2_hash, None, H256::from([2; 32])); + let c2_number = 4; + let c2_hash = insert_header(&backend, c2_number, c1_hash, None, Default::default()); + + // fork from b1. + let d1_number = 2; + let d1_hash = insert_header(&backend, d1_number, b1_hash, None, H256::from([3; 32])); + let d2_number = 3; + let d2_hash = insert_header(&backend, d2_number, d1_hash, None, Default::default()); + + { + let displaced_a1 = + blockchain.displaced_leaves_after_finalizing(a1_hash, a1_number).unwrap(); + assert_eq!( + displaced_a1.displaced_leaves, + vec![(c2_number, c2_hash), (d2_number, d2_hash)] + ); + let mut displaced_blocks = vec![b1_hash, b2_hash, c1_hash, c2_hash, d1_hash, d2_hash]; + displaced_blocks.sort(); + assert_eq!(displaced_a1.displaced_blocks, displaced_blocks); + + let displaced_a2 = + blockchain.displaced_leaves_after_finalizing(a2_hash, a2_number).unwrap(); + assert_eq!(displaced_a1.displaced_leaves, displaced_a2.displaced_leaves); + assert_eq!(displaced_a1.displaced_blocks, displaced_a2.displaced_blocks); + + let displaced_a3 = + blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + assert_eq!(displaced_a1.displaced_leaves, displaced_a3.displaced_leaves); + assert_eq!(displaced_a1.displaced_blocks, displaced_a3.displaced_blocks); + } + { + let displaced = + blockchain.displaced_leaves_after_finalizing(b1_hash, b1_number).unwrap(); + assert_eq!(displaced.displaced_leaves, vec![(a3_number, a3_hash)]); + let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash]; + displaced_blocks.sort(); + assert_eq!(displaced.displaced_blocks, displaced_blocks); + } + { + let displaced = + blockchain.displaced_leaves_after_finalizing(b2_hash, b2_number).unwrap(); + assert_eq!( + displaced.displaced_leaves, + vec![(a3_number, a3_hash), (d2_number, d2_hash)] + ); + let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash]; + displaced_blocks.sort(); + assert_eq!(displaced.displaced_blocks, displaced_blocks); + } + { + let displaced = + blockchain.displaced_leaves_after_finalizing(c2_hash, c2_number).unwrap(); + assert_eq!( + displaced.displaced_leaves, + vec![(a3_number, a3_hash), (d2_number, d2_hash)] + ); + let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash]; + displaced_blocks.sort(); + assert_eq!(displaced.displaced_blocks, displaced_blocks); + } + } + #[test] fn test_tree_route_regression() { // NOTE: this is a test for a regression introduced in #3665, the result diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 1f54b82030ff226b179afe8f167e134417b859e9..c10c60822ff8dbec0b3ac800a1c3c2b0a3d8b0d8 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -17,43 +17,43 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = "0.12.1" -schnellru = "0.2.1" -tracing = "0.1.29" +parking_lot = { workspace = true, default-features = true } +schnellru = { workspace = true } +tracing = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } -sc-executor-common = { path = "common" } -sc-executor-polkavm = { path = "polkavm" } -sc-executor-wasmtime = { path = "wasmtime" } -sp-api = { path = "../../primitives/api" } -sp-core = { path = "../../primitives/core" } -sp-externalities = { path = "../../primitives/externalities" } -sp-io = { path = "../../primitives/io" } -sp-panic-handler = { path = "../../primitives/panic-handler" } -sp-runtime-interface = { path = "../../primitives/runtime-interface" } -sp-trie = { path = "../../primitives/trie" } -sp-version = { path = "../../primitives/version" } -sp-wasm-interface = { path = "../../primitives/wasm-interface" } +codec = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sc-executor-polkavm = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-panic-handler = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } [dev-dependencies] -array-bytes = "6.2.2" -assert_matches = "1.3.0" -wat = "1.0" -sc-runtime-test = { path = "runtime-test" } -substrate-test-runtime = { path = "../../test-utils/runtime" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-state-machine = { path = "../../primitives/state-machine" } -sp-runtime = { path = "../../primitives/runtime" } -sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } -sc-tracing = { path = "../tracing" } -sp-tracing = { path = "../../primitives/tracing" } +array-bytes = { workspace = true, default-features = true } +assert_matches = { workspace = true } +wat = { workspace = true } +sc-runtime-test = { workspace = true } +substrate-test-runtime = { workspace = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } tracing-subscriber = { workspace = true } -paste = "1.0" -regex = "1.6.0" -criterion = "0.5.1" -env_logger = "0.11" -num_cpus = "1.13.1" -tempfile = "3.3.0" +paste = { workspace = true, default-features = true } +regex = { workspace = true } +criterion = { workspace = true, default-features = true } +env_logger = { workspace = true } +num_cpus = { workspace = true } +tempfile = { workspace = true } [[bench]] name = "bench" diff --git a/substrate/client/executor/common/Cargo.toml b/substrate/client/executor/common/Cargo.toml index 8ff34c3709a5e486fb9036a638788a532c6f296c..e985c75ca908a669f0ff7e6cc7368269e0814343 100644 --- a/substrate/client/executor/common/Cargo.toml +++ b/substrate/client/executor/common/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = { workspace = true } -wasm-instrument = "0.4" -sc-allocator = { path = "../../allocator" } -sp-maybe-compressed-blob = { path = "../../../primitives/maybe-compressed-blob" } -sp-wasm-interface = { path = "../../../primitives/wasm-interface" } +wasm-instrument = { workspace = true, default-features = true } +sc-allocator = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } polkavm = { workspace = true } [features] diff --git a/substrate/client/executor/polkavm/Cargo.toml b/substrate/client/executor/polkavm/Cargo.toml index 9d0eb8ccf0ee072c86195068a589309edd132ba4..8b849209a07cf17838c89e32cc53fcb80fe3c6ab 100644 --- a/substrate/client/executor/polkavm/Cargo.toml +++ b/substrate/client/executor/polkavm/Cargo.toml @@ -19,5 +19,5 @@ targets = ["x86_64-unknown-linux-gnu"] log = { workspace = true } polkavm = { workspace = true } -sc-executor-common = { path = "../common" } -sp-wasm-interface = { path = "../../../primitives/wasm-interface" } +sc-executor-common = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } diff --git a/substrate/client/executor/runtime-test/Cargo.toml b/substrate/client/executor/runtime-test/Cargo.toml index 82610c4f50c2841fea13c1f859cc242f8ae427c7..5f5e7eb46d623782916e55637dfd93e892cab6fe 100644 --- a/substrate/client/executor/runtime-test/Cargo.toml +++ b/substrate/client/executor/runtime-test/Cargo.toml @@ -16,14 +16,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false, features = ["improved_panic_error_reporting"] } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-core = { workspace = true } +sp-io = { features = ["improved_panic_error_reporting"], workspace = true } +sp-runtime = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-std = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/client/executor/src/integration_tests/mod.rs b/substrate/client/executor/src/integration_tests/mod.rs index 7f91b3ffe7644e37ebee0bc035faafbf38e21148..5d94ec6dcd38628be7858b8477e7e4892a45b210 100644 --- a/substrate/client/executor/src/integration_tests/mod.rs +++ b/substrate/client/executor/src/integration_tests/mod.rs @@ -178,7 +178,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new(sp_core::storage::Storage { + let mut expected = TestExternalities::new(sp_core::storage::Storage { top: map![ b"input".to_vec() => value, b"foo".to_vec() => b"bar".to_vec(), @@ -186,7 +186,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { ], children_default: map![], }); - assert_eq!(ext, expected); + assert!(ext.eq(&mut expected)); } test_wasm_execution!(clear_prefix_should_work); @@ -208,7 +208,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new(sp_core::storage::Storage { + let mut expected = TestExternalities::new(sp_core::storage::Storage { top: map![ b"aaa".to_vec() => b"1".to_vec(), b"aab".to_vec() => b"2".to_vec(), @@ -216,7 +216,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { ], children_default: map![], }); - assert_eq!(expected, ext); + assert!(expected.eq(&mut ext)); } test_wasm_execution!(blake2_256_should_work); diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index d3d670650db789b2b9b854a5fda8724a506833ee..e58b19bb12431a3ffb5b9fc77526b523e7cf4863 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -17,24 +17,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -cfg-if = "1.0" -libc = "0.2.152" -parking_lot = "0.12.1" +cfg-if = { workspace = true } +libc = { workspace = true } +parking_lot = { workspace = true, default-features = true } # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! -wasmtime = { version = "8.0.1", default-features = false, features = [ +wasmtime = { features = [ "cache", "cranelift", "jitdump", "parallel-compilation", "pooling-allocator", -] } -anyhow = "1.0.81" -sc-allocator = { path = "../../allocator" } -sc-executor-common = { path = "../common" } -sp-runtime-interface = { path = "../../../primitives/runtime-interface" } -sp-wasm-interface = { path = "../../../primitives/wasm-interface", features = ["wasmtime"] } +], workspace = true } +anyhow = { workspace = true } +sc-allocator = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-wasm-interface = { features = ["wasmtime"], workspace = true, default-features = true } # Here we include the rustix crate in the exactly same semver-compatible version as used by # wasmtime and enable its 'use-libc' flag. @@ -42,13 +42,13 @@ sp-wasm-interface = { path = "../../../primitives/wasm-interface", features = [" # By default rustix directly calls the appropriate syscalls completely bypassing libc; # this doesn't have any actual benefits for us besides making it harder to debug memory # problems (since then `mmap` etc. cannot be easily hooked into). -rustix = { version = "0.36.7", default-features = false, features = ["fs", "mm", "param", "std", "use-libc"] } +rustix = { features = ["fs", "mm", "param", "std", "use-libc"], workspace = true } [dev-dependencies] -wat = "1.0" -sc-runtime-test = { path = "../runtime-test" } -sp-io = { path = "../../../primitives/io" } -tempfile = "3.3.0" -paste = "1.0" -codec = { package = "parity-scale-codec", version = "3.6.12" } -cargo_metadata = "0.15.4" +wat = { workspace = true } +sc-runtime-test = { workspace = true } +sp-io = { workspace = true, default-features = true } +tempfile = { workspace = true } +paste = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +cargo_metadata = { workspace = true } diff --git a/substrate/client/informant/Cargo.toml b/substrate/client/informant/Cargo.toml index 191ef5f19f8df65b3c817129fe34c4db7438826d..9da2296deee3c2cdec7d2ba2ef7d2958dc03d86c 100644 --- a/substrate/client/informant/Cargo.toml +++ b/substrate/client/informant/Cargo.toml @@ -16,13 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ansi_term = "0.12.1" -futures = "0.3.30" -futures-timer = "3.0.1" +ansi_term = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -sc-client-api = { path = "../api" } -sc-network-common = { path = "../network/common" } -sc-network-sync = { path = "../network/sync" } -sc-network = { path = "../network" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-runtime = { path = "../../primitives/runtime" } +sc-client-api = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/keystore/Cargo.toml b/substrate/client/keystore/Cargo.toml index 443ce3507542c155119f2d424d18e9268b4d391b..cf128016370b41fde1c5c1fb151008a8a589c581 100644 --- a/substrate/client/keystore/Cargo.toml +++ b/substrate/client/keystore/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -parking_lot = "0.12.1" +array-bytes = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -sp-application-crypto = { path = "../../primitives/application-crypto" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } +sp-application-crypto = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } [features] # This feature adds BLS crypto primitives. diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index 3cf3cdd15dad9bb30fe2a9a70b6adc9f4541e56e..e219d36d3f79cf8770c58b33c138b8d1ffd01df0 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -14,22 +14,22 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" +codec = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sc-client-api = { path = "../api" } -sp-consensus-beefy = { path = "../../primitives/consensus/beefy" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core" } -sp-mmr-primitives = { path = "../../primitives/merkle-mountain-range" } -sc-offchain = { path = "../offchain" } -sp-runtime = { path = "../../primitives/runtime" } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -parking_lot = "0.12.1" -sc-block-builder = { path = "../block-builder" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -tokio = "1.37" +parking_lot = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index 25e6e316a8be0ab6ec0ff70f38a05f603ebf8404..5f856b4069a0f37a37189418a2a0e36976e1f98f 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -15,14 +15,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core" } -sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } -sp-runtime = { path = "../../../primitives/runtime" } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 1626305639498855148b515e666e1e91a603643f..2b06c3eca2344440b8391e9a351e0739d12352be 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -16,25 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -arrayvec = "0.7.2" -blake2 = "0.10.4" -bytes = "1" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" +array-bytes = { workspace = true, default-features = true } +arrayvec = { workspace = true } +blake2 = { workspace = true, default-features = true } +bytes = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -mixnet = "0.7.0" -multiaddr = "0.17.1" -parking_lot = "0.12.1" -sc-client-api = { path = "../api" } -sc-network = { path = "../network" } -sc-network-types = { path = "../network/types" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-api = { path = "../../primitives/api" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } -sp-mixnet = { path = "../../primitives/mixnet" } -sp-runtime = { path = "../../primitives/runtime" } +mixnet = { workspace = true } +multiaddr = { workspace = true } +parking_lot = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-mixnet = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index 3eeea6651186162ed2f28bdf12ec08ad184df906..f1441e4a1cf274996df7baf40183142f8bf2a2a0 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -17,23 +17,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ahash = "0.8.2" -futures = "0.3.30" -futures-timer = "3.0.1" -libp2p = "0.51.4" +ahash = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -schnellru = "0.2.1" -tracing = "0.1.29" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-network = { path = "../network" } -sc-network-common = { path = "../network/common" } -sc-network-sync = { path = "../network/sync" } -sc-network-types = { path = "../network/types" } -sp-runtime = { path = "../../primitives/runtime" } +schnellru = { workspace = true } +tracing = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -tokio = "1.37" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -quickcheck = { version = "1.0.3", default-features = false } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +tokio = { workspace = true, default-features = true } +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +quickcheck = { workspace = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index cd344d9196d873b820c11246f1518e81a87f8683..414da9b2a5890bb61fb77e3d942bb04e3e44add7 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -348,7 +348,7 @@ impl futures::future::FusedFuture for GossipEngine { #[cfg(test)] mod tests { use super::*; - use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext}; + use crate::{ValidationResult, ValidatorContext}; use codec::{DecodeAll, Encode}; use futures::{ channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, @@ -363,6 +363,7 @@ mod tests { }; use sc_network_common::role::ObservedRole; use sc_network_sync::SyncEventStream; + use sc_network_types::multiaddr::Multiaddr; use sp_runtime::{ testing::H256, traits::{Block as BlockT, NumberFor}, diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs index e04ea2a91e7cbc59ae652dace7ede5398bedf01c..20d9922200c2c3c6cf394692099a21c01b540bd0 100644 --- a/substrate/client/network-gossip/src/lib.rs +++ b/substrate/client/network-gossip/src/lib.rs @@ -67,9 +67,12 @@ pub use self::{ validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext}, }; -use sc_network::{multiaddr, types::ProtocolName, NetworkBlock, NetworkEventStream, NetworkPeers}; +use sc_network::{types::ProtocolName, NetworkBlock, NetworkEventStream, NetworkPeers}; use sc_network_sync::SyncEventStream; -use sc_network_types::PeerId; +use sc_network_types::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::iter; @@ -80,8 +83,7 @@ mod validator; /// Abstraction over a network. pub trait Network: NetworkPeers + NetworkEventStream { fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) { - let addr = - iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); + let addr = Multiaddr::empty().with(Protocol::P2p(*who.as_ref())); let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect()); if let Err(err) = result { log::error!(target: "gossip", "add_set_reserved failed: {}", err); diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs index 016afa95eceaed228012641af0982e103c51eb64..ac3f7a1b8c74c24fbccf44906f97249739bee247 100644 --- a/substrate/client/network-gossip/src/state_machine.rs +++ b/substrate/client/network-gossip/src/state_machine.rs @@ -542,12 +542,12 @@ impl Metrics { #[cfg(test)] mod tests { use super::*; - use crate::multiaddr::Multiaddr; use futures::prelude::*; use sc_network::{ config::MultiaddrWithPeerId, event::Event, service::traits::NotificationEvent, MessageSink, NetworkBlock, NetworkEventStream, NetworkPeers, ReputationChange, }; + use sc_network_types::multiaddr::Multiaddr; use sp_runtime::{ testing::{Block as RawBlock, ExtrinsicWrapper, H256}, traits::NumberFor, diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 29b14a4511cac655096b3e3dded5013462ae7e9e..a0cf42eaab22f54685a9087ac0fc3f9c68e1d31a 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -17,71 +17,71 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -array-bytes = "6.2.2" -async-channel = "1.8.0" -async-trait = "0.1.79" -asynchronous-codec = "0.6" -bytes = "1" -cid = "0.9.0" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -either = "1.5.3" -fnv = "1.0.6" -futures = "0.3.30" -futures-timer = "3.0.2" -ip_network = "0.4.1" -libp2p = { version = "0.51.4", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"] } -linked_hash_set = "0.1.4" +array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } +async-trait = { workspace = true } +asynchronous-codec = { workspace = true } +bytes = { workspace = true, default-features = true } +cid = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +either = { workspace = true, default-features = true } +fnv = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +ip_network = { workspace = true } +libp2p = { features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"], workspace = true } +linked_hash_set = { workspace = true } log = { workspace = true, default-features = true } -mockall = "0.11.3" -parking_lot = "0.12.1" -partial_sort = "0.2.0" -pin-project = "1.0.12" -rand = "0.8.5" +mockall = { workspace = true } +parking_lot = { workspace = true, default-features = true } +partial_sort = { workspace = true } +pin-project = { workspace = true } +rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -smallvec = "1.11.0" +smallvec = { workspace = true, default-features = true } thiserror = { workspace = true } -tokio = { version = "1.22.0", features = ["macros", "sync"] } -tokio-stream = "0.1.7" -unsigned-varint = { version = "0.7.2", features = ["asynchronous_codec", "futures"] } -zeroize = "1.4.3" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -prost = "0.12.4" -sc-client-api = { path = "../api" } -sc-network-common = { path = "common" } -sc-network-types = { path = "types" } -sc-utils = { path = "../utils" } -sp-arithmetic = { path = "../../primitives/arithmetic" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-runtime = { path = "../../primitives/runtime" } -wasm-timer = "0.2" -litep2p = "0.5.0" -once_cell = "1.18.0" -void = "1.0.2" -schnellru = "0.2.1" +tokio = { features = ["macros", "sync"], workspace = true, default-features = true } +tokio-stream = { workspace = true } +unsigned-varint = { features = ["asynchronous_codec", "futures"], workspace = true } +zeroize = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +prost = { workspace = true } +sc-client-api = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +wasm-timer = { workspace = true } +litep2p = { workspace = true } +once_cell = { workspace = true } +void = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -assert_matches = "1.3" -mockall = "0.11.3" -multistream-select = "0.12.1" -rand = "0.8.5" -tempfile = "3.1.0" -tokio = { version = "1.22.0", features = ["macros"] } -tokio-util = { version = "0.7.4", features = ["compat"] } -tokio-test = "0.4.2" -sc-block-builder = { path = "../block-builder" } -sc-network-light = { path = "light" } -sc-network-sync = { path = "sync" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-test-primitives = { path = "../../primitives/test-primitives" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime = { path = "../../test-utils/runtime" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +assert_matches = { workspace = true } +mockall = { workspace = true } +multistream-select = { workspace = true } +rand = { workspace = true, default-features = true } +tempfile = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +tokio-util = { features = ["compat"], workspace = true } +tokio-test = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-network-light = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-test-primitives = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } [features] default = [] diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml index 9a1bf5b88ea1a97cf6bdfe358a44338561500b50..79326492159b153ad8e8c91932aeac0fa4ecb6bc 100644 --- a/substrate/client/network/common/Cargo.toml +++ b/substrate/client/network/common/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -async-trait = "0.1.79" -bitflags = "1.3.2" -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +async-trait = { workspace = true } +bitflags = { workspace = true } +codec = { features = [ "derive", -] } -futures = "0.3.30" -libp2p-identity = { version = "0.1.3", features = ["peerid"] } -sc-consensus = { path = "../../consensus/common" } -sc-network-types = { path = "../types" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sp-runtime = { path = "../../../primitives/runtime" } +], workspace = true, default-features = true } +futures = { workspace = true } +libp2p-identity = { features = ["peerid"], workspace = true } +sc-consensus = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index baaed578b884172bdcfba5eb2a66e48ac32c56a5..52deaa93852af84213a4515c7bf3a3e9cb6892f2 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -async-channel = "1.8.0" -array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +async-channel = { workspace = true } +array-bytes = { workspace = true, default-features = true } +codec = { features = [ "derive", -] } -futures = "0.3.30" +], workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -prost = "0.12.4" -sp-blockchain = { path = "../../../primitives/blockchain" } -sc-client-api = { path = "../../api" } -sc-network-types = { path = "../types" } -sc-network = { path = ".." } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } +prost = { workspace = true } +sp-blockchain = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index 68deac0f47bc1e3b367fc56ba77e67ba63380648..68816a10980d46565cc05e58d81e86b10bbc92e5 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -31,8 +31,8 @@ use crate::{ use futures::channel::oneshot; use libp2p::{ - core::Multiaddr, identify::Info as IdentifyInfo, identity::PublicKey, kad::RecordKey, - swarm::NetworkBehaviour, PeerId, + connection_limits::ConnectionLimits, core::Multiaddr, identify::Info as IdentifyInfo, + identity::PublicKey, kad::RecordKey, swarm::NetworkBehaviour, PeerId, StreamProtocol, }; use parking_lot::Mutex; @@ -47,8 +47,10 @@ pub use crate::request_responses::{InboundFailure, OutboundFailure, ResponseFail /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourOut")] +#[behaviour(to_swarm = "BehaviourOut")] pub struct Behaviour { + /// Connection limits. + connection_limits: libp2p::connection_limits::Behaviour, /// All the substrate-specific protocols. substrate: Protocol, /// Periodically pings and identifies the nodes we are connected to, and store information in a @@ -180,6 +182,7 @@ impl Behaviour { request_response_protocols: Vec, peer_store_handle: Arc, external_addresses: Arc>>, + connection_limits: ConnectionLimits, ) -> Result { Ok(Self { substrate, @@ -193,6 +196,7 @@ impl Behaviour { request_response_protocols.into_iter(), peer_store_handle, )?, + connection_limits: libp2p::connection_limits::Behaviour::new(connection_limits), }) } @@ -267,7 +271,7 @@ impl Behaviour { pub fn add_self_reported_address_to_dht( &mut self, peer_id: &PeerId, - supported_protocols: &[impl AsRef<[u8]>], + supported_protocols: &[StreamProtocol], addr: Multiaddr, ) { self.discovery.add_self_reported_address(peer_id, supported_protocols, addr); @@ -376,3 +380,9 @@ impl From for BehaviourOut { } } } + +impl From for BehaviourOut { + fn from(e: void::Void) -> Self { + void::unreachable(e) + } +} diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs index 100a1e9dfb38ea23d4bb7d846aed0295f1cdaaea..e939558b20b8b021d8b38140d84365621b93413f 100644 --- a/substrate/client/network/src/config.rs +++ b/substrate/client/network/src/config.rs @@ -114,13 +114,13 @@ pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { /// Splits a Multiaddress into a Multiaddress and PeerId. pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { - let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => - PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, + let multihash = match addr.pop() { + Some(multiaddr::Protocol::P2p(multihash)) => multihash, _ => return Err(ParseErr::PeerIdMissing), }; + let peer_id = PeerId::from_multihash(multihash).map_err(|_| ParseErr::InvalidPeerId)?; - Ok((who, addr)) + Ok((peer_id, addr)) } /// Address of a node, including its identity. diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 2c788ec713f345ef18d421a07c279e9dfd9f7ba0..3145b891a8d3c1d0287b7d070d9ed40402847c49 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -55,20 +55,20 @@ use ip_network::IpNetwork; use libp2p::{ core::{Endpoint, Multiaddr}, kad::{ - handler::KademliaHandler, + self, record::store::{MemoryStore, RecordStore}, - GetClosestPeersError, GetRecordOk, Kademlia, KademliaBucketInserts, KademliaConfig, - KademliaEvent, QueryId, QueryResult, Quorum, Record, RecordKey, + Behaviour as Kademlia, BucketInserts, Config as KademliaConfig, Event as KademliaEvent, + GetClosestPeersError, GetRecordOk, QueryId, QueryResult, Quorum, Record, RecordKey, }, mdns::{self, tokio::Behaviour as TokioMdns}, multiaddr::Protocol, swarm::{ behaviour::{ toggle::{Toggle, ToggleConnectionHandler}, - DialFailure, FromSwarm, NewExternalAddr, + DialFailure, ExternalAddrConfirmed, FromSwarm, }, - ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, PollParameters, + StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -105,8 +105,8 @@ pub struct DiscoveryConfig { discovery_only_if_under_num: u64, enable_mdns: bool, kademlia_disjoint_query_paths: bool, - kademlia_protocol: Vec, - kademlia_legacy_protocol: Vec, + kademlia_protocol: Option, + kademlia_legacy_protocol: Option, kademlia_replication_factor: NonZeroUsize, } @@ -122,8 +122,8 @@ impl DiscoveryConfig { discovery_only_if_under_num: std::u64::MAX, enable_mdns: false, kademlia_disjoint_query_paths: false, - kademlia_protocol: Vec::new(), - kademlia_legacy_protocol: Vec::new(), + kademlia_protocol: None, + kademlia_legacy_protocol: None, kademlia_replication_factor: NonZeroUsize::new(DEFAULT_KADEMLIA_REPLICATION_FACTOR) .expect("value is a constant; constant is non-zero; qed."), } @@ -179,8 +179,8 @@ impl DiscoveryConfig { fork_id: Option<&str>, protocol_id: &ProtocolId, ) -> &mut Self { - self.kademlia_protocol = kademlia_protocol_name(genesis_hash, fork_id); - self.kademlia_legacy_protocol = legacy_kademlia_protocol_name(protocol_id); + self.kademlia_protocol = Some(kademlia_protocol_name(genesis_hash, fork_id)); + self.kademlia_legacy_protocol = Some(legacy_kademlia_protocol_name(protocol_id)); self } @@ -213,26 +213,31 @@ impl DiscoveryConfig { kademlia_replication_factor, } = self; - let kademlia = if !kademlia_protocol.is_empty() { + let kademlia = if let Some(ref kademlia_protocol) = kademlia_protocol { let mut config = KademliaConfig::default(); config.set_replication_factor(kademlia_replication_factor); // Populate kad with both the legacy and the new protocol names. // Remove the legacy protocol: // https://github.com/paritytech/polkadot-sdk/issues/504 - let kademlia_protocols = [kademlia_protocol.clone(), kademlia_legacy_protocol]; + let kademlia_protocols = if let Some(legacy_protocol) = kademlia_legacy_protocol { + vec![kademlia_protocol.clone(), legacy_protocol] + } else { + vec![kademlia_protocol.clone()] + }; config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); - config.set_record_filtering(libp2p::kad::KademliaStoreInserts::FilterBoth); + config.set_record_filtering(libp2p::kad::StoreInserts::FilterBoth); // By default Kademlia attempts to insert all peers into its routing table once a // dialing attempt succeeds. In order to control which peer is added, disable the // auto-insertion and instead add peers manually. - config.set_kbucket_inserts(KademliaBucketInserts::Manual); + config.set_kbucket_inserts(BucketInserts::Manual); config.disjoint_query_paths(kademlia_disjoint_query_paths); let store = MemoryStore::new(local_peer_id); let mut kad = Kademlia::with_config(local_peer_id, store, config); + kad.set_mode(Some(kad::Mode::Server)); for (peer_id, addr) in &permanent_addresses { kad.add_address(peer_id, addr.clone()); @@ -323,7 +328,7 @@ pub struct DiscoveryBehaviour { /// /// Remove when all nodes are upgraded to genesis hash and fork ID-based Kademlia: /// . - kademlia_protocol: Vec, + kademlia_protocol: Option, } impl DiscoveryBehaviour { @@ -369,7 +374,7 @@ impl DiscoveryBehaviour { pub fn add_self_reported_address( &mut self, peer_id: &PeerId, - supported_protocols: &[impl AsRef<[u8]>], + supported_protocols: &[StreamProtocol], addr: Multiaddr, ) { if let Some(kademlia) = self.kademlia.as_mut() { @@ -386,10 +391,12 @@ impl DiscoveryBehaviour { // Extract the chain-based Kademlia protocol from `kademlia.protocol_name()` // when all nodes are upgraded to genesis hash and fork ID-based Kademlia: // https://github.com/paritytech/polkadot-sdk/issues/504. - if !supported_protocols - .iter() - .any(|p| p.as_ref() == self.kademlia_protocol.as_slice()) - { + if !supported_protocols.iter().any(|p| { + p == self + .kademlia_protocol + .as_ref() + .expect("kademlia protocol was checked above to be enabled; qed") + }) { trace!( target: "sub-libp2p", "Ignoring self-reported address {} from {} as remote node is not part of the \ @@ -503,7 +510,7 @@ impl DiscoveryBehaviour { #[derive(Debug)] pub enum DiscoveryOut { /// A connection to a peer has been established but the peer has not been - /// added to the routing table because [`KademliaBucketInserts::Manual`] is + /// added to the routing table because [`BucketInserts::Manual`] is /// configured. If the peer is to be included in the routing table, it must /// be explicitly added via /// [`DiscoveryBehaviour::add_self_reported_address`]. @@ -552,8 +559,9 @@ pub enum DiscoveryOut { } impl NetworkBehaviour for DiscoveryBehaviour { - type ConnectionHandler = ToggleConnectionHandler>; - type OutEvent = DiscoveryOut; + type ConnectionHandler = + ToggleConnectionHandler< as NetworkBehaviour>::ConnectionHandler>; + type ToSwarm = DiscoveryOut; fn handle_established_inbound_connection( &mut self, @@ -689,11 +697,11 @@ impl NetworkBehaviour for DiscoveryBehaviour { FromSwarm::ListenerError(e) => { self.kademlia.on_swarm_event(FromSwarm::ListenerError(e)); }, - FromSwarm::ExpiredExternalAddr(e) => { + FromSwarm::ExternalAddrExpired(e) => { // We intentionally don't remove the element from `known_external_addresses` in // order to not print the log line again. - self.kademlia.on_swarm_event(FromSwarm::ExpiredExternalAddr(e)); + self.kademlia.on_swarm_event(FromSwarm::ExternalAddrExpired(e)); }, FromSwarm::NewListener(e) => { self.kademlia.on_swarm_event(FromSwarm::NewListener(e)); @@ -701,8 +709,18 @@ impl NetworkBehaviour for DiscoveryBehaviour { FromSwarm::ExpiredListenAddr(e) => { self.kademlia.on_swarm_event(FromSwarm::ExpiredListenAddr(e)); }, - FromSwarm::NewExternalAddr(e @ NewExternalAddr { addr }) => { - let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id.into())); + FromSwarm::NewExternalAddrCandidate(e) => { + self.kademlia.on_swarm_event(FromSwarm::NewExternalAddrCandidate(e)); + }, + FromSwarm::AddressChange(e) => { + self.kademlia.on_swarm_event(FromSwarm::AddressChange(e)); + }, + FromSwarm::NewListenAddr(e) => { + self.kademlia.on_swarm_event(FromSwarm::NewListenAddr(e)); + self.mdns.on_swarm_event(FromSwarm::NewListenAddr(e)); + }, + FromSwarm::ExternalAddrConfirmed(e @ ExternalAddrConfirmed { addr }) => { + let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id)); if Self::can_add_to_dht(addr) { // NOTE: we might re-discover the same address multiple times @@ -716,14 +734,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - self.kademlia.on_swarm_event(FromSwarm::NewExternalAddr(e)); - }, - FromSwarm::AddressChange(e) => { - self.kademlia.on_swarm_event(FromSwarm::AddressChange(e)); - }, - FromSwarm::NewListenAddr(e) => { - self.kademlia.on_swarm_event(FromSwarm::NewListenAddr(e)); - self.mdns.on_swarm_event(FromSwarm::NewListenAddr(e)); + self.kademlia.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); }, } } @@ -741,7 +752,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ToSwarm::GenerateEvent(ev)) @@ -963,10 +974,17 @@ impl NetworkBehaviour for DiscoveryBehaviour { ToSwarm::Dial { opts } => return Poll::Ready(ToSwarm::Dial { opts }), ToSwarm::NotifyHandler { peer_id, handler, event } => return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), - ToSwarm::ReportObservedAddr { address, score } => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), ToSwarm::CloseConnection { peer_id, connection } => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + ToSwarm::NewExternalAddrCandidate(observed) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + ToSwarm::ExternalAddrConfirmed(addr) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + ToSwarm::ExternalAddrExpired(addr) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } @@ -979,8 +997,9 @@ impl NetworkBehaviour for DiscoveryBehaviour { continue } - self.pending_events - .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); + self.pending_events.extend( + list.into_iter().map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id)), + ); if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ToSwarm::GenerateEvent(ev)) } @@ -990,13 +1009,19 @@ impl NetworkBehaviour for DiscoveryBehaviour { ToSwarm::Dial { .. } => { unreachable!("mDNS never dials!"); }, - ToSwarm::NotifyHandler { event, .. } => match event {}, /* `event` is an */ - // enum with no - // variant - ToSwarm::ReportObservedAddr { address, score } => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), + // `event` is an enum with no variant + ToSwarm::NotifyHandler { event, .. } => match event {}, ToSwarm::CloseConnection { peer_id, connection } => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + ToSwarm::NewExternalAddrCandidate(observed) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + ToSwarm::ExternalAddrConfirmed(addr) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + ToSwarm::ExternalAddrExpired(addr) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } @@ -1005,21 +1030,24 @@ impl NetworkBehaviour for DiscoveryBehaviour { } /// Legacy (fallback) Kademlia protocol name based on `protocol_id`. -fn legacy_kademlia_protocol_name(id: &ProtocolId) -> Vec { - let mut v = vec![b'/']; - v.extend_from_slice(id.as_ref().as_bytes()); - v.extend_from_slice(b"/kad"); - v +fn legacy_kademlia_protocol_name(id: &ProtocolId) -> StreamProtocol { + let name = format!("/{}/kad", id.as_ref()); + StreamProtocol::try_from_owned(name).expect("protocol name is valid. qed") } /// Kademlia protocol name based on `genesis_hash` and `fork_id`. -fn kademlia_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> Vec { +fn kademlia_protocol_name>( + genesis_hash: Hash, + fork_id: Option<&str>, +) -> StreamProtocol { let genesis_hash_hex = bytes2hex("", genesis_hash.as_ref()); - if let Some(fork_id) = fork_id { - format!("/{}/{}/kad", genesis_hash_hex, fork_id).as_bytes().into() + let name = if let Some(fork_id) = fork_id { + format!("/{genesis_hash_hex}/{fork_id}/kad") } else { - format!("/{}/kad", genesis_hash_hex).as_bytes().into() - } + format!("/{genesis_hash_hex}/kad") + }; + + StreamProtocol::try_from_owned(name).expect("protocol name is valid. qed") } #[cfg(test)] @@ -1036,7 +1064,7 @@ mod tests { }, identity::Keypair, noise, - swarm::{Executor, Swarm, SwarmBuilder, SwarmEvent}, + swarm::{Executor, Swarm, SwarmEvent}, yamux, Multiaddr, }; use sp_core::hash::H256; @@ -1082,7 +1110,8 @@ mod tests { }; let runtime = tokio::runtime::Runtime::new().unwrap(); - let mut swarm = SwarmBuilder::with_executor( + #[allow(deprecated)] + let mut swarm = libp2p::swarm::SwarmBuilder::with_executor( transport, behaviour, keypair.public().to_peer_id(), diff --git a/substrate/client/network/src/event.rs b/substrate/client/network/src/event.rs index d0ccbd8622b885730864d29ac835441cd276d970..b518a2094d7665ce4e44b67fcac99c992f73c08c 100644 --- a/substrate/client/network/src/event.rs +++ b/substrate/client/network/src/event.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Network event types. These are are not the part of the protocol, but rather +//! Network event types. These are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. use crate::types::ProtocolName; diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index ff5f492df246a6e4165700132d080c183b91b79a..6ff05e6af327a275d4377df9b987ccbefcb35da4 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -33,8 +33,9 @@ use litep2p::{ libp2p::{ identify::{Config as IdentifyConfig, IdentifyEvent}, kademlia::{ - Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, KademliaEvent, - KademliaHandle, QueryId, Quorum, Record, RecordKey, RecordsType, + Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, + IncomingRecordValidationMode, KademliaEvent, KademliaHandle, QueryId, Quorum, + Record, RecordKey, RecordsType, }, ping::{Config as PingConfig, PingEvent}, }, @@ -52,7 +53,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, - time::Duration, + time::{Duration, Instant}, }; /// Logging target for the file. @@ -138,6 +139,15 @@ pub enum DiscoveryEvent { /// Query ID. query_id: QueryId, }, + + /// Incoming record to store. + IncomingRecord { + /// Record. + record: Record, + }, + + /// Started a random Kademlia query. + RandomKademliaStarted, } /// Discovery. @@ -249,6 +259,7 @@ impl Discovery { KademliaConfigBuilder::new() .with_known_peers(known_peers) .with_protocol_names(protocol_names) + .with_incoming_records_validation_mode(IncomingRecordValidationMode::Manual) .build() }; @@ -295,7 +306,7 @@ impl Discovery { ) { if self.local_protocols.is_disjoint(&supported_protocols) { log::trace!( - target: "sub-libp2p", + target: LOG_TARGET, "Ignoring self-reported address of peer {peer} as remote node is not part of the \ Kademlia DHT supported by the local node.", ); @@ -340,6 +351,30 @@ impl Discovery { .await } + /// Store record in the local DHT store. + pub async fn store_record( + &mut self, + key: KademliaKey, + value: Vec, + publisher: Option, + expires: Option, + ) { + log::debug!( + target: LOG_TARGET, + "Storing DHT record with key {key:?}, originally published by {publisher:?}, \ + expires {expires:?}.", + ); + + self.kademlia_handle + .store_record(Record { + key: RecordKey::new(&key.to_vec()), + value, + publisher: publisher.map(Into::into), + expires, + }) + .await; + } + /// Check if the observed address is a known address. fn is_known_address(known: &Multiaddr, observed: &Multiaddr) -> bool { let mut known = known.iter(); @@ -424,6 +459,7 @@ impl Stream for Discovery { match this.kademlia_handle.try_find_node(peer) { Ok(query_id) => { this.find_node_query_id = Some(query_id); + return Poll::Ready(Some(DiscoveryEvent::RandomKademliaStarted)) }, Err(()) => { this.duration_to_next_find_query = cmp::min( @@ -481,6 +517,16 @@ impl Stream for Discovery { false => return Poll::Ready(Some(DiscoveryEvent::QueryFailed { query_id })), } }, + Poll::Ready(Some(KademliaEvent::IncomingRecord { record })) => { + log::trace!( + target: LOG_TARGET, + "incoming `PUT_RECORD` request with key {:?} from publisher {:?}", + record.key, + record.publisher, + ); + + return Poll::Ready(Some(DiscoveryEvent::IncomingRecord { record })) + }, } match Pin::new(&mut this.identify_event_stream).poll_next(cx) { diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index ae287052b2d44934a8b4827e53c77cae0d707be9..824f62082cac7c593f74bac21cd66b1bf4fef11c 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -58,7 +58,7 @@ use litep2p::{ protocol::{ libp2p::{ bitswap::Config as BitswapConfig, - kademlia::{QueryId, RecordsType}, + kademlia::{QueryId, Record, RecordsType}, }, request_response::ConfigBuilder as RequestResponseConfigBuilder, }, @@ -369,11 +369,13 @@ impl Litep2pNetworkBackend { .with_websocket(WebSocketTransportConfig { listen_addresses: websocket.into_iter().flatten().map(Into::into).collect(), yamux_config: yamux_config.clone(), + nodelay: true, ..Default::default() }) .with_tcp(TcpTransportConfig { listen_addresses: tcp.into_iter().flatten().map(Into::into).collect(), yamux_config, + nodelay: true, ..Default::default() }) } @@ -698,6 +700,9 @@ impl NetworkBackend for Litep2pNetworkBac let query_id = self.discovery.put_value(key.clone(), value).await; self.pending_put_values.insert(query_id, (key, Instant::now())); } + NetworkServiceCommand::StoreRecord { key, value, publisher, expires } => { + self.discovery.store_record(key, value, publisher.map(Into::into), expires).await; + } NetworkServiceCommand::EventStream { tx } => { self.event_streams.push(tx); } @@ -915,6 +920,22 @@ impl NetworkBackend for Litep2pNetworkBac "ping time with {peer:?}: {rtt:?}", ); } + Some(DiscoveryEvent::IncomingRecord { record: Record { key, value, publisher, expires }} ) => { + self.event_streams.send(Event::Dht( + DhtEvent::PutRecordRequest( + libp2p::kad::RecordKey::new(&key), + value, + publisher.map(Into::into), + expires, + ) + )); + }, + + Some(DiscoveryEvent::RandomKademliaStarted) => { + if let Some(metrics) = self.metrics.as_ref() { + metrics.kademlia_random_queries_total.inc(); + } + } }, event = self.litep2p.next_event() => match event { Some(Litep2pEvent::ConnectionEstablished { peer, endpoint }) => { diff --git a/substrate/client/network/src/litep2p/peerstore.rs b/substrate/client/network/src/litep2p/peerstore.rs index dd377ea09af9b3c9769f06bdba8078a4fbc41a8e..347aa0b90eed511b8ac5bcbee8089617a56eae8f 100644 --- a/substrate/client/network/src/litep2p/peerstore.rs +++ b/substrate/client/network/src/litep2p/peerstore.rs @@ -42,14 +42,20 @@ use std::{ const LOG_TARGET: &str = "sub-libp2p::peerstore"; /// We don't accept nodes whose reputation is under this value. -pub const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100); +pub const BANNED_THRESHOLD: i32 = 71 * (i32::MIN / 100); /// Relative decrement of a reputation value that is applied every second. I.e., for inverse -/// decrement of 50 we decrease absolute value of the reputation by 1/50. This corresponds to a -/// factor of `k = 0.98`. It takes ~ `ln(0.5) / ln(k)` seconds to reduce the reputation by half, -/// or 34.3 seconds for the values above. In this setup the maximum allowed absolute value of -/// `i32::MAX` becomes 0 in ~1100 seconds (actually less due to integer arithmetic). -const INVERSE_DECREMENT: i32 = 50; +/// decrement of 200 we decrease absolute value of the reputation by 1/200. +/// +/// This corresponds to a factor of `k = 0.995`, where k = 1 - 1 / INVERSE_DECREMENT. +/// +/// It takes ~ `ln(0.5) / ln(k)` seconds to reduce the reputation by half, or 138.63 seconds for the +/// values above. +/// +/// In this setup: +/// - `i32::MAX` becomes 0 in exactly 3544 seconds, or approximately 59 minutes +/// - `i32::MIN` escapes the banned threshold in 69 seconds +const INVERSE_DECREMENT: i32 = 200; /// Amount of time between the moment we last updated the [`PeerStore`] entry and the moment we /// remove it, once the reputation value reaches 0. @@ -79,6 +85,11 @@ impl PeerInfo { self.reputation < BANNED_THRESHOLD } + fn add_reputation(&mut self, increment: i32) { + self.reputation = self.reputation.saturating_add(increment); + self.bump_last_updated(); + } + fn decay_reputation(&mut self, seconds_passed: u64) { // Note that decaying the reputation value happens "on its own", // so we don't do `bump_last_updated()`. @@ -97,6 +108,10 @@ impl PeerInfo { } } } + + fn bump_last_updated(&mut self) { + self.last_updated = Instant::now(); + } } #[derive(Debug, Default)] @@ -163,7 +178,7 @@ impl PeerStoreProvider for PeerstoreHandle { match lock.peers.get_mut(&peer) { Some(info) => { - info.reputation = info.reputation.saturating_add(reputation_change.value); + info.add_reputation(reputation_change.value); }, None => { lock.peers.insert( @@ -362,7 +377,7 @@ mod tests { #[test] fn decaying_max_reputation_finally_yields_zero() { const INITIAL_REPUTATION: i32 = i32::MAX; - const SECONDS: u64 = 1000; + const SECONDS: u64 = 3544; let mut peer_info = PeerInfo::default(); peer_info.reputation = INITIAL_REPUTATION; @@ -377,7 +392,7 @@ mod tests { #[test] fn decaying_min_reputation_finally_yields_zero() { const INITIAL_REPUTATION: i32 = i32::MIN; - const SECONDS: u64 = 1000; + const SECONDS: u64 = 3544; let mut peer_info = PeerInfo::default(); peer_info.reputation = INITIAL_REPUTATION; diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs index 8f36b0828bd39d7f159111cea4713c8d1b04e54c..7d972bbeee5c7591ae6662480a670595e93a866c 100644 --- a/substrate/client/network/src/litep2p/service.rs +++ b/substrate/client/network/src/litep2p/service.rs @@ -76,6 +76,21 @@ pub enum NetworkServiceCommand { value: Vec, }, + /// Store record in the local DHT store. + StoreRecord { + /// Record key. + key: KademliaKey, + + /// Record value. + value: Vec, + + /// Original publisher of the record. + publisher: Option, + + /// Record expiration time as measured by a local, monothonic clock. + expires: Option, + }, + /// Query network status. Status { /// `oneshot::Sender` for sending the status. @@ -240,13 +255,17 @@ impl NetworkDHTProvider for Litep2pNetworkService { fn store_record( &self, - _key: KademliaKey, - _value: Vec, - _publisher: Option, - _expires: Option, + key: KademliaKey, + value: Vec, + publisher: Option, + expires: Option, ) { - // Will be added once litep2p is released with: https://github.com/paritytech/litep2p/pull/135 - log::warn!(target: LOG_TARGET, "Store record is not implemented for litep2p"); + let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::StoreRecord { + key, + value, + publisher, + expires, + }); } } @@ -264,8 +283,20 @@ impl NetworkStatusProvider for Litep2pNetworkService { async fn network_state(&self) -> Result { Ok(NetworkState { peer_id: self.local_peer_id.to_base58(), - listened_addresses: self.listen_addresses.read().iter().cloned().collect(), - external_addresses: self.external_addresses.read().iter().cloned().collect(), + listened_addresses: self + .listen_addresses + .read() + .iter() + .cloned() + .map(|a| Multiaddr::from(a).into()) + .collect(), + external_addresses: self + .external_addresses + .read() + .iter() + .cloned() + .map(|a| Multiaddr::from(a).into()) + .collect(), connected_peers: HashMap::new(), not_connected_peers: HashMap::new(), // TODO: Check what info we can include here. diff --git a/substrate/client/network/src/peer_info.rs b/substrate/client/network/src/peer_info.rs index 2735bd873db91dbcc0a54786e2c64408d6f21133..21eeea6bcc0c30415c646fdd18abb6f5f56ae09d 100644 --- a/substrate/client/network/src/peer_info.rs +++ b/substrate/client/network/src/peer_info.rs @@ -31,14 +31,15 @@ use libp2p::{ Info as IdentifyInfo, }, identity::PublicKey, - ping::{Behaviour as Ping, Config as PingConfig, Event as PingEvent, Success as PingSuccess}, + ping::{Behaviour as Ping, Config as PingConfig, Event as PingEvent}, swarm::{ behaviour::{ - AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm, - ListenFailure, + AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, + ExternalAddrConfirmed, FromSwarm, ListenFailure, }, - ConnectionDenied, ConnectionHandler, ConnectionId, IntoConnectionHandlerSelect, - NetworkBehaviour, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionHandler, ConnectionHandlerSelect, ConnectionId, + NetworkBehaviour, NewExternalAddrCandidate, PollParameters, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }, Multiaddr, PeerId, }; @@ -47,7 +48,7 @@ use parking_lot::Mutex; use smallvec::SmallVec; use std::{ - collections::{hash_map::Entry, HashSet}, + collections::{hash_map::Entry, HashSet, VecDeque}, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -71,6 +72,8 @@ pub struct PeerInfoBehaviour { garbage_collect: Pin + Send>>, /// Record keeping of external addresses. Data is queried by the `NetworkService`. external_addresses: ExternalAddresses, + /// Pending events to emit to [`Swarm`](libp2p::swarm::Swarm). + pending_actions: VecDeque>>, } /// Information about a node we're connected to. @@ -134,6 +137,7 @@ impl PeerInfoBehaviour { nodes_info: FnvHashMap::default(), garbage_collect: Box::pin(interval(GARBAGE_COLLECT_INTERVAL)), external_addresses: ExternalAddresses { addresses: external_addresses }, + pending_actions: Default::default(), } } @@ -148,13 +152,18 @@ impl PeerInfoBehaviour { /// Inserts a ping time in the cache. Has no effect if we don't have any entry for that node, /// which shouldn't happen. - fn handle_ping_report(&mut self, peer_id: &PeerId, ping_time: Duration) { - trace!(target: "sub-libp2p", "Ping time with {:?}: {:?}", peer_id, ping_time); + fn handle_ping_report( + &mut self, + peer_id: &PeerId, + ping_time: Duration, + connection: ConnectionId, + ) { + trace!(target: "sub-libp2p", "Ping time with {:?} via {:?}: {:?}", peer_id, connection, ping_time); if let Some(entry) = self.nodes_info.get_mut(peer_id) { entry.latest_ping = Some(ping_time); } else { error!(target: "sub-libp2p", - "Received ping from node we're not connected to {:?}", peer_id); + "Received ping from node we're not connected to {:?} via {:?}", peer_id, connection); } } @@ -208,11 +217,11 @@ pub enum PeerInfoEvent { } impl NetworkBehaviour for PeerInfoBehaviour { - type ConnectionHandler = IntoConnectionHandlerSelect< + type ConnectionHandler = ConnectionHandlerSelect< ::ConnectionHandler, ::ConnectionHandler, >; - type OutEvent = PeerInfoEvent; + type ToSwarm = PeerInfoEvent; fn handle_pending_inbound_connection( &mut self, @@ -378,9 +387,9 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.ping.on_swarm_event(FromSwarm::ListenerError(e)); self.identify.on_swarm_event(FromSwarm::ListenerError(e)); }, - FromSwarm::ExpiredExternalAddr(e) => { - self.ping.on_swarm_event(FromSwarm::ExpiredExternalAddr(e)); - self.identify.on_swarm_event(FromSwarm::ExpiredExternalAddr(e)); + FromSwarm::ExternalAddrExpired(e) => { + self.ping.on_swarm_event(FromSwarm::ExternalAddrExpired(e)); + self.identify.on_swarm_event(FromSwarm::ExternalAddrExpired(e)); }, FromSwarm::NewListener(e) => { self.ping.on_swarm_event(FromSwarm::NewListener(e)); @@ -391,10 +400,23 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.identify.on_swarm_event(FromSwarm::ExpiredListenAddr(e)); self.external_addresses.remove(e.addr); }, - FromSwarm::NewExternalAddr(e) => { - self.ping.on_swarm_event(FromSwarm::NewExternalAddr(e)); - self.identify.on_swarm_event(FromSwarm::NewExternalAddr(e)); - self.external_addresses.add(e.addr.clone()); + FromSwarm::NewExternalAddrCandidate(e @ NewExternalAddrCandidate { addr }) => { + self.ping.on_swarm_event(FromSwarm::NewExternalAddrCandidate(e)); + self.identify.on_swarm_event(FromSwarm::NewExternalAddrCandidate(e)); + + // Manually confirm all external address candidates. + // TODO: consider adding [AutoNAT protocol](https://docs.rs/libp2p/0.52.3/libp2p/autonat/index.html) + // (must go through the polkadot protocol spec) or implemeting heuristics for + // approving external address candidates. This can be done, for example, by + // approving only addresses reported by multiple peers. + // See also https://github.com/libp2p/rust-libp2p/pull/4721 introduced + // in libp2p v0.53 for heuristics approach. + self.pending_actions.push_back(ToSwarm::ExternalAddrConfirmed(addr.clone())); + }, + FromSwarm::ExternalAddrConfirmed(e @ ExternalAddrConfirmed { addr }) => { + self.ping.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); + self.identify.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); + self.external_addresses.add(addr.clone()); }, FromSwarm::AddressChange(e @ AddressChange { peer_id, old, new, .. }) => { self.ping.on_swarm_event(FromSwarm::AddressChange(e)); @@ -437,13 +459,17 @@ impl NetworkBehaviour for PeerInfoBehaviour { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { + if let Some(event) = self.pending_actions.pop_front() { + return Poll::Ready(event) + } + loop { match self.ping.poll(cx, params) { Poll::Pending => break, Poll::Ready(ToSwarm::GenerateEvent(ev)) => { - if let PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) } = ev { - self.handle_ping_report(&peer, rtt) + if let PingEvent { peer, result: Ok(rtt), connection } = ev { + self.handle_ping_report(&peer, rtt, connection) } }, Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), @@ -453,10 +479,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { handler, event: Either::Left(event), }), - Poll::Ready(ToSwarm::ReportObservedAddr { address, score }) => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + Poll::Ready(ToSwarm::ListenOn { opts }) => + return Poll::Ready(ToSwarm::ListenOn { opts }), + Poll::Ready(ToSwarm::RemoveListener { id }) => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } @@ -482,10 +516,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { handler, event: Either::Right(event), }), - Poll::Ready(ToSwarm::ReportObservedAddr { address, score }) => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + Poll::Ready(ToSwarm::ListenOn { opts }) => + return Poll::Ready(ToSwarm::ListenOn { opts }), + Poll::Ready(ToSwarm::RemoveListener { id }) => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs index 2e57ff1b6a86f7512acd704c755692daeec51a0f..977c4c4de663211a5e1694ada392c3e1164818a3 100644 --- a/substrate/client/network/src/protocol.rs +++ b/substrate/client/network/src/protocol.rs @@ -163,9 +163,6 @@ impl Protocol { pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: ProtocolName) { if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { - // Note: no need to remove a peer from `self.peers` if we are dealing with sync - // protocol, because it will be done when handling - // `NotificationsOut::CustomProtocolClosed`. self.behaviour.disconnect_peer(peer_id, SetId::from(position)); } else { warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") @@ -229,7 +226,7 @@ pub enum CustomMessageOutcome { impl NetworkBehaviour for Protocol { type ConnectionHandler = ::ConnectionHandler; - type OutEvent = CustomMessageOutcome; + type ToSwarm = CustomMessageOutcome; fn handle_established_inbound_connection( &mut self, @@ -290,17 +287,25 @@ impl NetworkBehaviour for Protocol { &mut self, cx: &mut std::task::Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { let event = match self.behaviour.poll(cx, params) { Poll::Pending => return Poll::Pending, Poll::Ready(ToSwarm::GenerateEvent(ev)) => ev, Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), - Poll::Ready(ToSwarm::ReportObservedAddr { address, score }) => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + Poll::Ready(ToSwarm::ListenOn { opts }) => + return Poll::Ready(ToSwarm::ListenOn { opts }), + Poll::Ready(ToSwarm::RemoveListener { id }) => + return Poll::Ready(ToSwarm::RemoveListener { id }), }; let outcome = match event { diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index 03ba437a66726caf6c284fc3863c4aca01118488..cb4f089995e3c6bffce8e6b8460602343d310174 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -1198,7 +1198,7 @@ impl Notifications { impl NetworkBehaviour for Notifications { type ConnectionHandler = NotifsHandler; - type OutEvent = NotificationsOut; + type ToSwarm = NotificationsOut; fn handle_pending_inbound_connection( &mut self, @@ -1678,10 +1678,11 @@ impl NetworkBehaviour for Notifications { FromSwarm::ListenerClosed(_) => {}, FromSwarm::ListenFailure(_) => {}, FromSwarm::ListenerError(_) => {}, - FromSwarm::ExpiredExternalAddr(_) => {}, + FromSwarm::ExternalAddrExpired(_) => {}, FromSwarm::NewListener(_) => {}, FromSwarm::ExpiredListenAddr(_) => {}, - FromSwarm::NewExternalAddr(_) => {}, + FromSwarm::NewExternalAddrCandidate(_) => {}, + FromSwarm::ExternalAddrConfirmed(_) => {}, FromSwarm::AddressChange(_) => {}, FromSwarm::NewListenAddr(_) => {}, } @@ -2239,7 +2240,7 @@ impl NetworkBehaviour for Notifications { &mut self, cx: &mut Context, _params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event) } @@ -2382,7 +2383,6 @@ mod tests { protocol::notifications::handler::tests::*, protocol_controller::{IncomingIndex, ProtoSetConfig, ProtocolController}, }; - use libp2p::swarm::AddressRecord; use sc_utils::mpsc::tracing_unbounded; use std::{collections::HashSet, iter}; @@ -2402,31 +2402,14 @@ mod tests { } #[derive(Clone)] - struct MockPollParams { - peer_id: PeerId, - addr: Multiaddr, - } + struct MockPollParams {} impl PollParameters for MockPollParams { type SupportedProtocolsIter = std::vec::IntoIter>; - type ListenedAddressesIter = std::vec::IntoIter; - type ExternalAddressesIter = std::vec::IntoIter; fn supported_protocols(&self) -> Self::SupportedProtocolsIter { vec![].into_iter() } - - fn listened_addresses(&self) -> Self::ListenedAddressesIter { - vec![self.addr.clone()].into_iter() - } - - fn external_addresses(&self) -> Self::ExternalAddressesIter { - vec![].into_iter() - } - - fn local_peer_id(&self) -> &PeerId { - &self.peer_id - } } fn development_notifs( @@ -3331,7 +3314,7 @@ mod tests { notif.on_swarm_event(FromSwarm::DialFailure(libp2p::swarm::behaviour::DialFailure { peer_id: Some(peer), - error: &libp2p::swarm::DialError::Banned, + error: &libp2p::swarm::DialError::Aborted, connection_id: ConnectionId::new_unchecked(1337), })); @@ -3877,7 +3860,7 @@ mod tests { let now = Instant::now(); notif.on_swarm_event(FromSwarm::DialFailure(libp2p::swarm::behaviour::DialFailure { peer_id: Some(peer), - error: &libp2p::swarm::DialError::Banned, + error: &libp2p::swarm::DialError::Aborted, connection_id: ConnectionId::new_unchecked(0), })); @@ -4003,7 +3986,7 @@ mod tests { assert!(notif.peers.get(&(peer, set_id)).is_some()); if tokio::time::timeout(Duration::from_secs(5), async { - let mut params = MockPollParams { peer_id: PeerId::random(), addr: Multiaddr::empty() }; + let mut params = MockPollParams {}; loop { futures::future::poll_fn(|cx| { @@ -4115,7 +4098,7 @@ mod tests { // verify that the code continues to keep the peer disabled by resetting the timer // after the first one expired. if tokio::time::timeout(Duration::from_secs(5), async { - let mut params = MockPollParams { peer_id: PeerId::random(), addr: Multiaddr::empty() }; + let mut params = MockPollParams {}; loop { futures::future::poll_fn(|cx| { diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index cb09583b73a2d32592fa6b70180779ba52c3de01..967ef614c5560ca5815193bbd597b13e35b2076e 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -75,8 +75,8 @@ use futures::{ use libp2p::{ core::ConnectedPoint, swarm::{ - handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, - NegotiatedSubstream, SubstreamProtocol, + handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream, + SubstreamProtocol, }, PeerId, }; @@ -199,7 +199,7 @@ enum State { /// emitted. OpenDesiredByRemote { /// Substream opened by the remote and that hasn't been accepted/rejected yet. - in_substream: NotificationsInSubstream, + in_substream: NotificationsInSubstream, /// See [`State::Closed::pending_opening`]. pending_opening: bool, @@ -212,7 +212,7 @@ enum State { /// be emitted when transitioning to respectively [`State::Open`] or [`State::Closed`]. Opening { /// Substream opened by the remote. If `Some`, has been accepted. - in_substream: Option>, + in_substream: Option>, /// Is the connection inbound. inbound: bool, }, @@ -236,14 +236,14 @@ enum State { /// Always `Some` on transition to [`State::Open`]. Switched to `None` only if the remote /// closed the substream. If `None`, a [`NotifsHandlerOut::CloseDesired`] event has been /// emitted. - out_substream: Option>, + out_substream: Option>, /// Substream opened by the remote. /// /// Contrary to the `out_substream` field, operations continue as normal even if the /// substream has been closed by the remote. A `None` is treated the same way as if there /// was an idle substream. - in_substream: Option>, + in_substream: Option>, }, } @@ -481,8 +481,8 @@ pub enum NotifsHandlerError { } impl ConnectionHandler for NotifsHandler { - type InEvent = NotifsHandlerIn; - type OutEvent = NotifsHandlerOut; + type FromBehaviour = NotifsHandlerIn; + type ToBehaviour = NotifsHandlerOut; type Error = NotifsHandlerError; type InboundProtocol = UpgradeCollec; type OutboundProtocol = NotificationsOut; @@ -517,7 +517,7 @@ impl ConnectionHandler for NotifsHandler { match protocol_info.state { State::Closed { pending_opening } => { - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenDesiredByRemote { protocol_index, handshake: in_substream_open.handshake, @@ -586,7 +586,7 @@ impl ConnectionHandler for NotifsHandler { in_substream: in_substream.take(), }; - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenResultOk { protocol_index, negotiated_fallback: new_open.negotiated_fallback, @@ -600,6 +600,8 @@ impl ConnectionHandler for NotifsHandler { } }, ConnectionEvent::AddressChange(_address_change) => {}, + ConnectionEvent::LocalProtocolsChange(_) => {}, + ConnectionEvent::RemoteProtocolsChange(_) => {}, ConnectionEvent::DialUpgradeError(dial_upgrade_error) => match self.protocols [dial_upgrade_error.info] .state @@ -614,7 +616,7 @@ impl ConnectionHandler for NotifsHandler { self.protocols[dial_upgrade_error.info].state = State::Closed { pending_opening: false }; - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenResultErr { protocol_index: dial_upgrade_error.info }, )); }, @@ -701,7 +703,7 @@ impl ConnectionHandler for NotifsHandler { self.protocols[protocol_index].state = State::Closed { pending_opening: true }; - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenResultErr { protocol_index }, )); }, @@ -711,7 +713,7 @@ impl ConnectionHandler for NotifsHandler { State::Closed { .. } => {}, } - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::CloseResult { protocol_index }, )); }, @@ -726,9 +728,11 @@ impl ConnectionHandler for NotifsHandler { // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote // to express desire to open substreams. + #[allow(deprecated)] KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME) } + #[allow(deprecated)] fn poll( &mut self, cx: &mut Context, @@ -736,7 +740,7 @@ impl ConnectionHandler for NotifsHandler { ConnectionHandlerEvent< Self::OutboundProtocol, Self::OutboundOpenInfo, - Self::OutEvent, + Self::ToBehaviour, Self::Error, >, > { @@ -755,6 +759,7 @@ impl ConnectionHandler for NotifsHandler { // Only proceed with `out_substream.poll_ready_unpin` if there is an element // available in `notifications_sink_rx`. This avoids waking up the task when // a substream is ready to send if there isn't actually something to send. + #[allow(deprecated)] match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => return Poll::Ready(ConnectionHandlerEvent::Close( @@ -808,7 +813,7 @@ impl ConnectionHandler for NotifsHandler { Poll::Ready(Err(_)) => { *out_substream = None; let event = NotifsHandlerOut::CloseDesired { protocol_index }; - return Poll::Ready(ConnectionHandlerEvent::Custom(event)) + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) }, }; }, @@ -830,11 +835,14 @@ impl ConnectionHandler for NotifsHandler { State::Opening { in_substream: None, .. } => {}, State::Open { in_substream: in_substream @ Some(_), .. } => - match Stream::poll_next(Pin::new(in_substream.as_mut().unwrap()), cx) { + match futures::prelude::stream::Stream::poll_next( + Pin::new(in_substream.as_mut().unwrap()), + cx, + ) { Poll::Pending => {}, Poll::Ready(Some(Ok(message))) => { let event = NotifsHandlerOut::Notification { protocol_index, message }; - return Poll::Ready(ConnectionHandlerEvent::Custom(event)) + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) }, Poll::Ready(None) | Poll::Ready(Some(Err(_))) => *in_substream = None, }, @@ -846,7 +854,7 @@ impl ConnectionHandler for NotifsHandler { Poll::Ready(Err(_)) => { self.protocols[protocol_index].state = State::Closed { pending_opening: *pending_opening }; - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::CloseDesired { protocol_index }, )) }, @@ -880,8 +888,8 @@ pub mod tests { use asynchronous_codec::Framed; use libp2p::{ core::muxing::SubstreamBox, - swarm::{handler, ConnectionHandlerUpgrErr}, - Multiaddr, + swarm::handler::{self, StreamUpgradeError}, + Multiaddr, Stream, }; use multistream_select::{dialer_select_proto, listener_select_proto, Negotiated, Version}; use std::{ @@ -972,6 +980,7 @@ pub mod tests { .await } } + struct MockSubstream { pub rx: mpsc::Receiver>, pub tx: mpsc::Sender>, @@ -991,18 +1000,35 @@ pub mod tests { } /// Create new negotiated substream pair. - pub async fn negotiated() -> (Negotiated, Negotiated) { + pub async fn negotiated() -> (Stream, Stream) { let (socket1, socket2) = Self::new(); let socket1 = SubstreamBox::new(socket1); let socket2 = SubstreamBox::new(socket2); - let protos = vec![b"/echo/1.0.0", b"/echo/2.5.0"]; + let protos = vec!["/echo/1.0.0", "/echo/2.5.0"]; let (res1, res2) = tokio::join!( dialer_select_proto(socket1, protos.clone(), Version::V1), listener_select_proto(socket2, protos), ); - (res1.unwrap().1, res2.unwrap().1) + (Self::stream_new(res1.unwrap().1), Self::stream_new(res2.unwrap().1)) + } + + /// Unsafe substitute for `Stream::new` private constructor. + fn stream_new(stream: Negotiated) -> Stream { + // Static asserts to make sure this doesn't break. + const _: () = { + assert!( + core::mem::size_of::() == + core::mem::size_of::>() + ); + assert!( + core::mem::align_of::() == + core::mem::align_of::>() + ); + }; + + unsafe { core::mem::transmute(stream) } } } @@ -1504,7 +1530,7 @@ pub mod tests { // inject dial failure to an already closed substream and verify outbound state is reset handler.on_connection_event(handler::ConnectionEvent::DialUpgradeError( - handler::DialUpgradeError { info: 0, error: ConnectionHandlerUpgrErr::Timeout }, + handler::DialUpgradeError { info: 0, error: StreamUpgradeError::Timeout }, )); assert!(std::matches!( handler.protocols[0].state, @@ -1574,7 +1600,7 @@ pub mod tests { // inject dial failure to an already closed substream and verify outbound state is reset handler.on_connection_event(handler::ConnectionEvent::DialUpgradeError( - handler::DialUpgradeError { info: 0, error: ConnectionHandlerUpgrErr::Timeout }, + handler::DialUpgradeError { info: 0, error: StreamUpgradeError::Timeout }, )); assert!(std::matches!( handler.protocols[0].state, @@ -1610,6 +1636,7 @@ pub mod tests { notifications_sink.send_sync_notification(vec![1, 3, 3, 9]); notifications_sink.send_sync_notification(vec![1, 3, 4, 0]); + #[allow(deprecated)] futures::future::poll_fn(|cx| { assert!(std::matches!( handler.poll(cx), @@ -1648,15 +1675,15 @@ pub mod tests { futures::future::poll_fn(|cx| { assert!(std::matches!( handler.poll(cx), - Poll::Ready(ConnectionHandlerEvent::Custom( + Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0, .. }, )) )); assert!(std::matches!( handler.poll(cx), - Poll::Ready(ConnectionHandlerEvent::Custom(NotifsHandlerOut::CloseDesired { - protocol_index: 0 - },)) + Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + NotifsHandlerOut::CloseDesired { protocol_index: 0 }, + )) )); Poll::Ready(()) }) diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs index a72b5b4a6748f0754279cef3f37dd37f601263dd..90c9cc5b7cde3b57f622e5cd04a07344e06a50f8 100644 --- a/substrate/client/network/src/protocol/notifications/tests.rs +++ b/substrate/client/network/src/protocol/notifications/tests.rs @@ -33,9 +33,8 @@ use libp2p::{ core::{transport::MemoryTransport, upgrade, Endpoint}, identity, noise, swarm::{ - behaviour::FromSwarm, ConnectionDenied, ConnectionId, Executor, NetworkBehaviour, - PollParameters, Swarm, SwarmBuilder, SwarmEvent, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + self, behaviour::FromSwarm, ConnectionDenied, ConnectionId, Executor, NetworkBehaviour, + PollParameters, Swarm, SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, yamux, Multiaddr, PeerId, Transport, }; @@ -141,13 +140,12 @@ fn build_nodes() -> (Swarm, Swarm) { } }); - let mut swarm = SwarmBuilder::with_executor( + let mut swarm = Swarm::new( transport, behaviour, keypairs[index].public().to_peer_id(), - TokioExecutor(runtime), - ) - .build(); + swarm::Config::with_executor(TokioExecutor(runtime)), + ); swarm.listen_on(addrs[index].clone()).unwrap(); out.push(swarm); } @@ -183,7 +181,7 @@ impl std::ops::DerefMut for CustomProtoWithAddr { impl NetworkBehaviour for CustomProtoWithAddr { type ConnectionHandler = ::ConnectionHandler; - type OutEvent = ::OutEvent; + type ToSwarm = ::ToSwarm; fn handle_pending_inbound_connection( &mut self, @@ -261,7 +259,7 @@ impl NetworkBehaviour for CustomProtoWithAddr { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { let _ = self.peer_store_future.poll_unpin(cx); let _ = self.protocol_controller_future.poll_unpin(cx); self.inner.poll(cx, params) diff --git a/substrate/client/network/src/protocol/notifications/upgrade.rs b/substrate/client/network/src/protocol/notifications/upgrade.rs index 8fd837f949d8a10d744513b9225ecf53d8a74d1e..72e0c2d10396232ec53b2e61e0dc5ceb4cb48dac 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade.rs @@ -20,6 +20,7 @@ pub(crate) use self::notifications::{ NotificationsInOpen, NotificationsInSubstreamHandshake, NotificationsOutOpen, }; + pub use self::{ collec::UpgradeCollec, notifications::{ diff --git a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs index 33c090ae50e9d87b8976f78fc61e751b474b49ec..ab0f87215cca3e55040dc3ad280fddb29d53254f 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use futures::prelude::*; -use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; +use libp2p::core::upgrade::{InboundUpgrade, UpgradeInfo}; use std::{ pin::Pin, task::{Context, Poll}, @@ -75,9 +75,9 @@ where #[derive(Debug, Clone, PartialEq)] pub struct ProtoNameWithUsize(T, usize); -impl ProtocolName for ProtoNameWithUsize { - fn protocol_name(&self) -> &[u8] { - self.0.protocol_name() +impl> AsRef for ProtoNameWithUsize { + fn as_ref(&self) -> &str { + self.0.as_ref() } } @@ -103,13 +103,13 @@ impl>, O, E> Future for FutWithUsize { mod tests { use super::*; use crate::types::ProtocolName as ProtoName; - use libp2p::core::upgrade::{ProtocolName, UpgradeInfo}; + use libp2p::core::upgrade::UpgradeInfo; // TODO: move to mocks mockall::mock! { pub ProtocolUpgrade {} - impl UpgradeInfo for ProtocolUpgrade { + impl> UpgradeInfo for ProtocolUpgrade { type Info = T; type InfoIter = vec::IntoIter; fn protocol_info(&self) -> vec::IntoIter; diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index c760b7a963fc156e29165792d7cf145240aa4506..a8a9e453a7bb0c313e95dc1ffee954fa144f4d11 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -513,45 +513,99 @@ pub enum NotificationsOutError { #[cfg(test)] mod tests { + use crate::ProtocolName; + use super::{ - NotificationsIn, NotificationsInOpen, NotificationsOut, NotificationsOutError, - NotificationsOutOpen, + NotificationsHandshakeError, NotificationsIn, NotificationsInOpen, + NotificationsInSubstream, NotificationsOut, NotificationsOutError, NotificationsOutOpen, + NotificationsOutSubstream, }; - use futures::{channel::oneshot, future, prelude::*}; - use libp2p::core::upgrade; + use futures::{channel::oneshot, future, prelude::*, SinkExt, StreamExt}; + use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use std::{pin::Pin, task::Poll}; use tokio::net::{TcpListener, TcpStream}; use tokio_util::compat::TokioAsyncReadCompatExt; + /// Opens a substream to the given address, negotiates the protocol, and returns the substream + /// along with the handshake message. + async fn dial( + addr: std::net::SocketAddr, + handshake: impl Into>, + ) -> Result< + ( + Vec, + NotificationsOutSubstream< + multistream_select::Negotiated>, + >, + ), + NotificationsHandshakeError, + > { + let socket = TcpStream::connect(addr).await.unwrap(); + let notifs_out = NotificationsOut::new("/test/proto/1", Vec::new(), handshake, 1024 * 1024); + let (_, substream) = multistream_select::dialer_select_proto( + socket.compat(), + notifs_out.protocol_info(), + upgrade::Version::V1, + ) + .await + .unwrap(); + let NotificationsOutOpen { handshake, substream, .. } = + >::upgrade_outbound( + notifs_out, + substream, + "/test/proto/1".into(), + ) + .await?; + Ok((handshake, substream)) + } + + /// Listens on a localhost, negotiates the protocol, and returns the substream along with the + /// handshake message. + /// + /// Also sends the listener address through the given channel. + async fn listen_on_localhost( + listener_addr_tx: oneshot::Sender, + ) -> Result< + ( + Vec, + NotificationsInSubstream< + multistream_select::Negotiated>, + >, + ), + NotificationsHandshakeError, + > { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let notifs_in = NotificationsIn::new("/test/proto/1", Vec::new(), 1024 * 1024); + let (_, substream) = + multistream_select::listener_select_proto(socket.compat(), notifs_in.protocol_info()) + .await + .unwrap(); + let NotificationsInOpen { handshake, substream, .. } = + >::upgrade_inbound( + notifs_in, + substream, + "/test/proto/1".into(), + ) + .await?; + Ok((handshake, substream)) + } + #[tokio::test] async fn basic_works() { - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1, - ) - .await - .unwrap(); + let (handshake, mut substream) = + dial(listener_addr_rx.await.unwrap(), &b"initial message"[..]).await.unwrap(); assert_eq!(handshake, b"hello world"); substream.send(b"test message".to_vec()).await.unwrap(); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await - .unwrap(); + let (handshake, mut substream) = listen_on_localhost(listener_addr_tx).await.unwrap(); assert_eq!(handshake, b"initial message"); substream.send_handshake(&b"hello world"[..]); @@ -566,33 +620,17 @@ mod tests { async fn empty_handshake() { // Check that everything still works when the handshake messages are empty. - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), vec![], 1024 * 1024), - upgrade::Version::V1, - ) - .await - .unwrap(); + let (handshake, mut substream) = + dial(listener_addr_rx.await.unwrap(), vec![]).await.unwrap(); assert!(handshake.is_empty()); substream.send(Default::default()).await.unwrap(); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await - .unwrap(); + let (handshake, mut substream) = listen_on_localhost(listener_addr_tx).await.unwrap(); assert!(handshake.is_empty()); substream.send_handshake(vec![]); @@ -605,17 +643,10 @@ mod tests { #[tokio::test] async fn refused() { - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let outcome = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), &b"hello"[..], 1024 * 1024), - upgrade::Version::V1, - ) - .await; + let outcome = dial(listener_addr_rx.await.unwrap(), &b"hello"[..]).await; // Despite the protocol negotiation being successfully conducted on the listener // side, we have to receive an error here because the listener didn't send the @@ -623,17 +654,7 @@ mod tests { assert!(outcome.is_err()); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, substream, .. } = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await - .unwrap(); - + let (handshake, substream) = listen_on_localhost(listener_addr_tx).await.unwrap(); assert_eq!(handshake, b"hello"); // We successfully upgrade to the protocol, but then close the substream. @@ -644,66 +665,29 @@ mod tests { #[tokio::test] async fn large_initial_message_refused() { - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let ret = upgrade::apply_outbound( - socket.compat(), - // We check that an initial message that is too large gets refused. - NotificationsOut::new( - PROTO_NAME, - Vec::new(), - (0..32768).map(|_| 0).collect::>(), - 1024 * 1024, - ), - upgrade::Version::V1, - ) - .await; + let ret = + dial(listener_addr_rx.await.unwrap(), (0..32768).map(|_| 0).collect::>()) + .await; assert!(ret.is_err()); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let ret = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await; - assert!(ret.is_err()); - + let _ret = listen_on_localhost(listener_addr_tx).await; client.await.unwrap(); } #[tokio::test] async fn large_handshake_refused() { - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let ret = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1, - ) - .await; + let ret = dial(listener_addr_rx.await.unwrap(), &b"initial message"[..]).await; assert!(ret.is_err()); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await - .unwrap(); + let (handshake, mut substream) = listen_on_localhost(listener_addr_tx).await.unwrap(); assert_eq!(handshake, b"initial message"); // We check that a handshake that is too large gets refused. @@ -720,10 +704,10 @@ mod tests { let client = tokio::spawn(async move { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let NotificationsOutOpen { handshake, .. } = upgrade::apply_outbound( - socket.compat(), + let NotificationsOutOpen { handshake, .. } = OutboundUpgrade::upgrade_outbound( NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1, + socket.compat(), + ProtocolName::Static(PROTO_NAME), ) .await .unwrap(); @@ -735,9 +719,10 @@ mod tests { listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), + let NotificationsInOpen { handshake, mut substream, .. } = InboundUpgrade::upgrade_inbound( NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + socket.compat(), + ProtocolName::Static(PROTO_NAME), ) .await .unwrap(); @@ -758,13 +743,19 @@ mod tests { let client = tokio::spawn(async move { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1, - ) - .await - .unwrap(); + let NotificationsOutOpen { handshake, mut substream, .. } = + OutboundUpgrade::upgrade_outbound( + NotificationsOut::new( + PROTO_NAME, + Vec::new(), + &b"initial message"[..], + 1024 * 1024, + ), + socket.compat(), + ProtocolName::Static(PROTO_NAME), + ) + .await + .unwrap(); assert_eq!(handshake, b"hello world"); @@ -786,9 +777,10 @@ mod tests { listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), + let NotificationsInOpen { handshake, mut substream, .. } = InboundUpgrade::upgrade_inbound( NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + socket.compat(), + ProtocolName::Static(PROTO_NAME), ) .await .unwrap(); diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs index fbf050a65713d1d66c361a1cde4e4a698089473d..3671d76ea630b1a83808a2a1ce3b2fa5d3ce4152 100644 --- a/substrate/client/network/src/request_responses.rs +++ b/substrate/client/network/src/request_responses.rs @@ -318,7 +318,6 @@ impl RequestResponsesBehaviour { let mut protocols = HashMap::new(); for protocol in list { let mut cfg = Config::default(); - cfg.set_connection_keep_alive(Duration::from_secs(10)); cfg.set_request_timeout(protocol.request_timeout); let protocol_support = if protocol.inbound_queue.is_some() { @@ -327,13 +326,13 @@ impl RequestResponsesBehaviour { ProtocolSupport::Outbound }; - let rq_rp = Behaviour::new( + let rq_rp = Behaviour::with_codec( GenericCodec { max_request_size: protocol.max_request_size, max_response_size: protocol.max_response_size, }, - iter::once(protocol.name.as_bytes().to_vec()) - .chain(protocol.fallback_names.iter().map(|name| name.as_bytes().to_vec())) + iter::once(protocol.name.clone()) + .chain(protocol.fallback_names) .zip(iter::repeat(protocol_support)), cfg, ); @@ -427,7 +426,7 @@ impl RequestResponsesBehaviour { impl NetworkBehaviour for RequestResponsesBehaviour { type ConnectionHandler = MultiHandler as NetworkBehaviour>::ConnectionHandler>; - type OutEvent = Event; + type ToSwarm = Event; fn handle_pending_inbound_connection( &mut self, @@ -543,9 +542,9 @@ impl NetworkBehaviour for RequestResponsesBehaviour { for (p, _) in self.protocols.values_mut() { NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenerError(e)); }, - FromSwarm::ExpiredExternalAddr(e) => + FromSwarm::ExternalAddrExpired(e) => for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ExpiredExternalAddr(e)); + NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrExpired(e)); }, FromSwarm::NewListener(e) => for (p, _) in self.protocols.values_mut() { @@ -555,9 +554,13 @@ impl NetworkBehaviour for RequestResponsesBehaviour { for (p, _) in self.protocols.values_mut() { NetworkBehaviour::on_swarm_event(p, FromSwarm::ExpiredListenAddr(e)); }, - FromSwarm::NewExternalAddr(e) => + FromSwarm::NewExternalAddrCandidate(e) => for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::NewExternalAddr(e)); + NetworkBehaviour::on_swarm_event(p, FromSwarm::NewExternalAddrCandidate(e)); + }, + FromSwarm::ExternalAddrConfirmed(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrConfirmed(e)); }, FromSwarm::AddressChange(e) => for (p, _) in self.protocols.values_mut() { @@ -592,7 +595,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { 'poll_all: loop { // Poll to see if any response is ready to be sent back. while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { @@ -663,10 +666,18 @@ impl NetworkBehaviour for RequestResponsesBehaviour { handler, event: ((*protocol).to_string(), event), }), - ToSwarm::ReportObservedAddr { address, score } => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), ToSwarm::CloseConnection { peer_id, connection } => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + ToSwarm::NewExternalAddrCandidate(observed) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + ToSwarm::ExternalAddrConfirmed(addr) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + ToSwarm::ExternalAddrExpired(addr) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + ToSwarm::ListenOn { opts } => + return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => + return Poll::Ready(ToSwarm::RemoveListener { id }), }; match ev { @@ -950,7 +961,7 @@ pub struct GenericCodec { #[async_trait::async_trait] impl Codec for GenericCodec { - type Protocol = Vec; + type Protocol = ProtocolName; type Request = Vec; type Response = Result, ()>; @@ -1078,7 +1089,7 @@ mod tests { }, identity::Keypair, noise, - swarm::{Executor, Swarm, SwarmBuilder, SwarmEvent}, + swarm::{Config as SwarmConfig, Executor, Swarm, SwarmEvent}, Multiaddr, }; use std::{iter, time::Duration}; @@ -1104,16 +1115,18 @@ mod tests { let behaviour = RequestResponsesBehaviour::new(list, Arc::new(MockPeerStore {})).unwrap(); let runtime = tokio::runtime::Runtime::new().unwrap(); - let mut swarm = SwarmBuilder::with_executor( + + let mut swarm = Swarm::new( transport, behaviour, keypair.public().to_peer_id(), - TokioExecutor(runtime), - ) - .build(); + SwarmConfig::with_executor(TokioExecutor(runtime)), + ); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); swarm.listen_on(listen_addr.clone()).unwrap(); + (swarm, listen_addr) } diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index 2cf4564e312c689a83b3d4ace09059e6e21547db..3a685787c48e603074ae63323963bbe9ce36a02b 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -61,18 +61,18 @@ use crate::{ use codec::DecodeAll; use either::Either; use futures::{channel::oneshot, prelude::*}; -use libp2p::identity::ed25519; #[allow(deprecated)] +use libp2p::swarm::THandlerErr; use libp2p::{ - connection_limits::Exceeded, + connection_limits::{ConnectionLimits, Exceeded}, core::{upgrade, ConnectedPoint, Endpoint}, identify::Info as IdentifyInfo, + identity::ed25519, kad::record::Key as KademliaKey, multiaddr::{self, Multiaddr}, - ping::Failure as PingFailure, swarm::{ - AddressScore, ConnectionError, ConnectionId, ConnectionLimits, DialError, Executor, - ListenError, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent, THandlerErr, + Config as SwarmConfig, ConnectionError, ConnectionId, DialError, Executor, ListenError, + NetworkBehaviour, Swarm, SwarmEvent, }, PeerId, }; @@ -274,10 +274,6 @@ where let local_identity: ed25519::Keypair = local_identity.into(); let local_public: ed25519::PublicKey = local_public.into(); let local_peer_id: PeerId = local_peer_id.into(); - let listen_addresses: Vec = - network_config.listen_addresses.iter().cloned().map(Into::into).collect(); - let public_addresses: Vec = - network_config.public_addresses.iter().cloned().map(Into::into).collect(); network_config.boot_nodes = network_config .boot_nodes @@ -559,6 +555,11 @@ where request_response_protocols, Arc::clone(&peer_store_handle), external_addresses.clone(), + ConnectionLimits::default() + .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) + .with_max_established_incoming(Some( + crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING, + )), ); match result { @@ -568,37 +569,27 @@ where } }; - let builder = { + let swarm = { struct SpawnImpl(F); impl + Send>>)> Executor for SpawnImpl { fn exec(&self, f: Pin + Send>>) { (self.0)(f) } } - SwarmBuilder::with_executor( - transport, - behaviour, - local_peer_id, - SpawnImpl(params.executor), - ) + + let config = SwarmConfig::with_executor(SpawnImpl(params.executor)) + .with_substream_upgrade_protocol_override(upgrade::Version::V1) + .with_notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) + // NOTE: 24 is somewhat arbitrary and should be tuned in the future if + // necessary. See + .with_per_connection_event_buffer_size(24) + .with_max_negotiating_inbound_streams(2048) + .with_idle_connection_timeout(Duration::from_secs(10)); + + Swarm::new(transport, behaviour, local_peer_id, config) }; - #[allow(deprecated)] - let builder = builder - .connection_limits( - ConnectionLimits::default() - .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) - .with_max_established_incoming(Some( - crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING, - )), - ) - .substream_upgrade_protocol_override(upgrade::Version::V1) - .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) - // NOTE: 24 is somewhat arbitrary and should be tuned in the future if necessary. - // See - .per_connection_event_buffer_size(24) - .max_negotiating_inbound_streams(2048); - - (builder.build(), Arc::new(Libp2pBandwidthSink { sink: bandwidth })) + + (swarm, Arc::new(Libp2pBandwidthSink { sink: bandwidth })) }; // Initialize the metrics. @@ -614,19 +605,15 @@ where }; // Listen on multiaddresses. - for addr in &listen_addresses { - if let Err(err) = Swarm::>::listen_on(&mut swarm, addr.clone()) { + for addr in &network_config.listen_addresses { + if let Err(err) = Swarm::>::listen_on(&mut swarm, addr.clone().into()) { warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) } } // Add external addresses. - for addr in &public_addresses { - Swarm::>::add_external_address( - &mut swarm, - addr.clone(), - AddressScore::Infinite, - ); + for addr in &network_config.public_addresses { + Swarm::>::add_external_address(&mut swarm, addr.clone().into()); } let listen_addresses_set = Arc::new(Mutex::new(HashSet::new())); @@ -807,7 +794,7 @@ where let peer_id = Swarm::>::local_peer_id(swarm).to_base58(); let listened_addresses = swarm.listeners().cloned().collect(); - let external_addresses = swarm.external_addresses().map(|r| &r.addr).cloned().collect(); + let external_addresses = swarm.external_addresses().cloned().collect(); NetworkState { peer_id, @@ -867,8 +854,7 @@ impl NetworkService { .into_iter() .map(|mut addr| { let peer = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| "Invalid PeerId format".to_string())?, + Some(multiaddr::Protocol::P2p(peer_id)) => peer_id, _ => return Err("Missing PeerId from address".to_string()), }; @@ -1492,6 +1478,7 @@ where } /// Process the next event coming from `Swarm`. + #[allow(deprecated)] fn handle_swarm_event(&mut self, event: SwarmEvent>>) { match event { SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. }) => { @@ -1586,9 +1573,11 @@ where listen_addrs.truncate(30); } for addr in listen_addrs { - self.network_service - .behaviour_mut() - .add_self_reported_address_to_dht(&peer_id, &protocols, addr); + self.network_service.behaviour_mut().add_self_reported_address_to_dht( + &peer_id, + &protocols, + addr.clone(), + ); } self.peer_store_handle.add_known_peer(peer_id.into()); }, @@ -1705,8 +1694,14 @@ where } } }, - SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, num_established } => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); + SwarmEvent::ConnectionClosed { + connection_id, + peer_id, + cause, + endpoint, + num_established, + } => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({peer_id:?} via {connection_id:?}, {cause:?})"); if let Some(metrics) = self.metrics.as_ref() { let direction = match endpoint { ConnectedPoint::Dialer { .. } => "out", @@ -1715,11 +1710,13 @@ where let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", Some(ConnectionError::Handler(Either::Left(Either::Left( - Either::Right(Either::Left(PingFailure::Timeout)), - )))) => "ping-timeout", - Some(ConnectionError::Handler(Either::Left(Either::Left( - Either::Left(NotifsHandlerError::SyncNotificationsClogged), + Either::Left(Either::Right( + NotifsHandlerError::SyncNotificationsClogged, + )), )))) => "sync-notifications-clogged", + Some(ConnectionError::Handler(Either::Left(Either::Left( + Either::Right(Either::Left(_)), + )))) => "ping-timeout", Some(ConnectionError::Handler(_)) => "protocol-error", Some(ConnectionError::KeepAliveTimeout) => "keep-alive-timeout", None => "actively-closed", @@ -1746,12 +1743,11 @@ where } self.listen_addresses.lock().remove(&address); }, - SwarmEvent::OutgoingConnectionError { peer_id, error } => { + SwarmEvent::OutgoingConnectionError { connection_id, peer_id, error } => { if let Some(peer_id) = peer_id { trace!( target: "sub-libp2p", - "Libp2p => Failed to reach {:?}: {}", - peer_id, error, + "Libp2p => Failed to reach {peer_id:?} via {connection_id:?}: {error}", ); let not_reported = !self.reported_invalid_boot_nodes.contains(&peer_id); @@ -1789,12 +1785,9 @@ where } else { None }, - DialError::ConnectionLimit(_) => Some("limit-reached"), - DialError::InvalidPeerId(_) | - DialError::WrongPeerId { .. } | - DialError::LocalPeerId { .. } => Some("invalid-peer-id"), + DialError::LocalPeerId { .. } => Some("local-peer-id"), + DialError::WrongPeerId { .. } => Some("invalid-peer-id"), DialError::Transport(_) => Some("transport-error"), - DialError::Banned | DialError::NoAddresses | DialError::DialPeerConditionFalse(_) | DialError::Aborted => None, // ignore them @@ -1804,21 +1797,24 @@ where } } }, - SwarmEvent::Dialing(peer_id) => { - trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id) + SwarmEvent::Dialing { connection_id, peer_id } => { + trace!(target: "sub-libp2p", "Libp2p => Dialing({peer_id:?}) via {connection_id:?}") }, - SwarmEvent::IncomingConnection { local_addr, send_back_addr } => { - trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({},{}))", - local_addr, send_back_addr); + SwarmEvent::IncomingConnection { connection_id, local_addr, send_back_addr } => { + trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({local_addr},{send_back_addr} via {connection_id:?}))"); if let Some(metrics) = self.metrics.as_ref() { metrics.incoming_connections_total.inc(); } }, - SwarmEvent::IncomingConnectionError { local_addr, send_back_addr, error } => { + SwarmEvent::IncomingConnectionError { + connection_id, + local_addr, + send_back_addr, + error, + } => { debug!( target: "sub-libp2p", - "Libp2p => IncomingConnectionError({},{}): {}", - local_addr, send_back_addr, error, + "Libp2p => IncomingConnectionError({local_addr},{send_back_addr} via {connection_id:?}): {error}" ); if let Some(metrics) = self.metrics.as_ref() { #[allow(deprecated)] @@ -1829,7 +1825,6 @@ where } else { None }, - ListenError::ConnectionLimit(_) => Some("limit-reached"), ListenError::WrongPeerId { .. } | ListenError::LocalPeerId { .. } => Some("invalid-peer-id"), ListenError::Transport(_) => Some("transport-error"), @@ -1844,17 +1839,6 @@ where } } }, - #[allow(deprecated)] - SwarmEvent::BannedPeer { peer_id, endpoint } => { - debug!( - target: "sub-libp2p", - "Libp2p => BannedPeer({}). Connected via {:?}.", - peer_id, endpoint, - ); - if let Some(metrics) = self.metrics.as_ref() { - metrics.incoming_connections_errors_total.with_label_values(&["banned"]).inc(); - } - }, SwarmEvent::ListenerClosed { reason, addresses, .. } => { if let Some(metrics) = self.metrics.as_ref() { metrics.listeners_local_addresses.sub(addresses.len() as u64); diff --git a/substrate/client/network/src/transport.rs b/substrate/client/network/src/transport.rs index 4136b34fc0e8e728b4d1a96e3ad5d8517ee4b552..ed7e7c574e16f41ef51d85b41ac041486dde0efd 100644 --- a/substrate/client/network/src/transport.rs +++ b/substrate/client/network/src/transport.rs @@ -57,7 +57,7 @@ pub fn build_transport( // Main transport: DNS(TCP) let tcp_config = tcp::Config::new().nodelay(true); let tcp_trans = tcp::tokio::Transport::new(tcp_config.clone()); - let dns_init = dns::TokioDnsConfig::system(tcp_trans); + let dns_init = dns::tokio::Transport::system(tcp_trans); Either::Left(if let Ok(dns) = dns_init { // WS + WSS transport @@ -66,7 +66,7 @@ pub fn build_transport( // unresolved addresses (BUT WSS transport itself needs an instance of DNS transport to // resolve and dial addresses). let tcp_trans = tcp::tokio::Transport::new(tcp_config); - let dns_for_wss = dns::TokioDnsConfig::system(tcp_trans) + let dns_for_wss = dns::tokio::Transport::system(tcp_trans) .expect("same system_conf & resolver to work"); Either::Left(websocket::WsConfig::new(dns_for_wss).or_transport(dns)) } else { diff --git a/substrate/client/network/src/types.rs b/substrate/client/network/src/types.rs index 25517599469e66294563575f1fbe69cc21240ceb..0652bbcdddecf6b4833f5ae762b8602b2958a56c 100644 --- a/substrate/client/network/src/types.rs +++ b/substrate/client/network/src/types.rs @@ -18,8 +18,6 @@ //! `sc-network` type definitions -use libp2p::core::upgrade; - use std::{ borrow::Borrow, fmt, @@ -94,9 +92,9 @@ impl fmt::Display for ProtocolName { } } -impl upgrade::ProtocolName for ProtocolName { - fn protocol_name(&self) -> &[u8] { - (self as &str).as_bytes() +impl AsRef for ProtocolName { + fn as_ref(&self) -> &str { + self as &str } } diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index 0dfaa491b65c93acb50f44bb002d655043317d99..4cced49fee7913d3163a509e0e32de9d6c978dcf 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -16,17 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -async-channel = "1.8.0" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -libp2p = "0.51.4" +array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-network-common = { path = "../common" } -sc-network-sync = { path = "../sync" } -sc-network-types = { path = "../types" } -sc-network = { path = ".." } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-statement-store = { path = "../../../primitives/statement-store" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index 964090444b22afdeca073ebc25d0be71817f1248..17e3e2119d7e811926a139193c8228ebe2cba1fb 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -16,43 +16,43 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -array-bytes = "6.2.2" -async-channel = "1.8.0" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" -libp2p = "0.51.4" +array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +libp2p = { workspace = true } log = { workspace = true, default-features = true } -mockall = "0.11.3" -prost = "0.12.4" -schnellru = "0.2.1" -smallvec = "1.11.0" +mockall = { workspace = true } +prost = { workspace = true } +schnellru = { workspace = true } +smallvec = { workspace = true, default-features = true } thiserror = { workspace = true } -tokio-stream = "0.1.14" -tokio = { version = "1.32.0", features = ["macros", "time"] } -fork-tree = { path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../../consensus/common" } -sc-network = { path = ".." } -sc-network-common = { path = "../common" } -sc-network-types = { path = "../types" } -sc-utils = { path = "../../utils" } -sp-arithmetic = { path = "../../../primitives/arithmetic" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sp-runtime = { path = "../../../primitives/runtime" } +tokio-stream = { workspace = true } +tokio = { features = ["macros", "time"], workspace = true, default-features = true } +fork-tree = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -mockall = "0.11.3" -quickcheck = { version = "1.0.3", default-features = false } -sc-block-builder = { path = "../../block-builder" } -sp-test-primitives = { path = "../../../primitives/test-primitives" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +mockall = { workspace = true } +quickcheck = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sp-test-primitives = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index f70e4847f59f32ee7418b370cb60339d3ff12ef7..74b43173508be640a1d828539a71696120f8e2f7 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -16,28 +16,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -tokio = "1.37" -async-trait = "0.1.79" -futures = "0.3.30" -futures-timer = "3.0.1" -libp2p = "0.51.4" +tokio = { workspace = true, default-features = true } +async-trait = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +libp2p = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -rand = "0.8.5" -sc-block-builder = { path = "../../block-builder" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../../consensus/common" } -sc-network = { path = ".." } -sc-network-common = { path = "../common" } -sc-network-types = { path = "../types" } -sc-utils = { path = "../../utils" } -sc-network-light = { path = "../light" } -sc-network-sync = { path = "../sync" } -sc-service = { path = "../../service", default-features = false, features = ["test-helpers"] } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime = { path = "../../../test-utils/runtime" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +parking_lot = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sc-network-light = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-service = { features = ["test-helpers"], workspace = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 8a8f9608051af0bcb523b405585ad7c0b81b4ae2..221c8515d6d416a691aaf59128299173064af295 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -114,7 +114,7 @@ impl PassThroughVerifier { #[async_trait::async_trait] impl Verifier for PassThroughVerifier { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { if block.fork_choice.is_none() { @@ -210,7 +210,7 @@ impl BlockImport for PeersClient { type Error = ConsensusError; async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.client.check_block(block).await @@ -600,7 +600,7 @@ where type Error = ConsensusError; async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.inner.check_block(block).await @@ -622,10 +622,7 @@ struct VerifierAdapter { #[async_trait::async_trait] impl Verifier for VerifierAdapter { - async fn verify( - &mut self, - block: BlockImportParams, - ) -> Result, String> { + async fn verify(&self, block: BlockImportParams) -> Result, String> { let hash = block.header.hash(); self.verifier.lock().await.verify(block).await.map_err(|e| { self.failed_verifications.lock().insert(hash, e.clone()); diff --git a/substrate/client/network/test/src/service.rs b/substrate/client/network/test/src/service.rs index 150c1db7560e6d589e21e9aecb2add06fc57a47c..c4a2b261081e6b101bf4620fd379996b3ae32a69 100644 --- a/substrate/client/network/test/src/service.rs +++ b/substrate/client/network/test/src/service.rs @@ -134,7 +134,7 @@ impl TestNetworkBuilder { #[async_trait::async_trait] impl sc_consensus::Verifier for PassThroughVerifier { async fn verify( - &mut self, + &self, mut block: sc_consensus::BlockImportParams, ) -> Result, String> { block.finalized = self.0; diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index d871b59b37bb120783aa2e27a23aa9ee814b3ddf..eb907b606d584bde259f083b63d37067476a5e5b 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -16,16 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -libp2p = "0.51.4" +array-bytes = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-network = { path = ".." } -sc-network-common = { path = "../common" } -sc-network-sync = { path = "../sync" } -sc-network-types = { path = "../types" } -sc-utils = { path = "../../utils" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-consensus = { path = "../../../primitives/consensus/common" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs index 3384aab5149dcb5b6127d75e744f010db950b5d1..31ad0781035e5bd0662a617dcb31502048bdea81 100644 --- a/substrate/client/network/transactions/src/lib.rs +++ b/substrate/client/network/transactions/src/lib.rs @@ -368,7 +368,8 @@ where { self.on_transactions(peer, m); } else { - warn!(target: "sub-libp2p", "Failed to decode transactions list"); + warn!(target: "sub-libp2p", "Failed to decode transactions list from peer {peer}"); + self.network.report_peer(peer, rep::BAD_TRANSACTION); } }, } diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index a9334aaa1705987a35322921616188113f011ed8..811ccddbef930d3264c7cc4a8d2310a92b6d714a 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -10,15 +10,16 @@ repository.workspace = true documentation = "https://docs.rs/sc-network-types" [dependencies] -bs58 = "0.5.0" -ed25519-dalek = "2.1" -libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } -litep2p = "0.5.0" -multiaddr = "0.17.0" -multihash = { version = "0.17.0", default-features = false, features = ["identity", "multihash-impl", "sha2", "std"] } -rand = "0.8.5" -thiserror = "1.0.48" -zeroize = { version = "1.7.0", default-features = false } +bs58 = { workspace = true, default-features = true } +ed25519-dalek = { workspace = true, default-features = true } +libp2p-identity = { features = ["ed25519", "peerid", "rand"], workspace = true } +litep2p = { workspace = true } +log = { workspace = true, default-features = true } +multiaddr = { workspace = true } +multihash = { workspace = true } +rand = { workspace = true, default-features = true } +thiserror = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] -quickcheck = "1.0.3" +quickcheck = { workspace = true, default-features = true } diff --git a/substrate/client/network/types/src/ed25519.rs b/substrate/client/network/types/src/ed25519.rs index e85f405b13066b8be1eed82f8ca783aa62c777d9..acaa01759e5cf791207ad0213b770d875ac26f4d 100644 --- a/substrate/client/network/types/src/ed25519.rs +++ b/substrate/client/network/types/src/ed25519.rs @@ -82,14 +82,14 @@ impl fmt::Debug for Keypair { impl From for Keypair { fn from(kp: litep2p_ed25519::Keypair) -> Self { - Self::try_from_bytes(&mut kp.encode()) + Self::try_from_bytes(&mut kp.to_bytes()) .expect("ed25519_dalek in substrate & litep2p to use the same format") } } impl From for litep2p_ed25519::Keypair { fn from(kp: Keypair) -> Self { - Self::decode(&mut kp.to_bytes()) + Self::try_from_bytes(&mut kp.to_bytes()) .expect("ed25519_dalek in substrate & litep2p to use the same format") } } @@ -191,14 +191,14 @@ impl PublicKey { impl From for PublicKey { fn from(k: litep2p_ed25519::PublicKey) -> Self { - Self::try_from_bytes(&k.encode()) + Self::try_from_bytes(&k.to_bytes()) .expect("ed25519_dalek in substrate & litep2p to use the same format") } } impl From for litep2p_ed25519::PublicKey { fn from(k: PublicKey) -> Self { - Self::decode(&k.to_bytes()) + Self::try_from_bytes(&k.to_bytes()) .expect("ed25519_dalek in substrate & litep2p to use the same format") } } @@ -272,7 +272,7 @@ impl From for SecretKey { impl From for litep2p_ed25519::SecretKey { fn from(sk: SecretKey) -> Self { - Self::from_bytes(&mut sk.to_bytes()) + Self::try_from_bytes(&mut sk.to_bytes()) .expect("litep2p `SecretKey` to accept 32 bytes as Ed25519 key") } } @@ -357,10 +357,10 @@ mod tests { let kp1: libp2p_ed25519::Keypair = kp.clone().into(); let kp2: litep2p_ed25519::Keypair = kp.clone().into(); let kp3 = libp2p_ed25519::Keypair::try_from_bytes(&mut kp_bytes.clone()).unwrap(); - let kp4 = litep2p_ed25519::Keypair::decode(&mut kp_bytes.clone()).unwrap(); + let kp4 = litep2p_ed25519::Keypair::try_from_bytes(&mut kp_bytes.clone()).unwrap(); assert_eq!(kp_bytes, kp1.to_bytes()); - assert_eq!(kp_bytes, kp2.encode()); + assert_eq!(kp_bytes, kp2.to_bytes()); let msg = "hello world".as_bytes(); let sig = kp.sign(msg); @@ -389,9 +389,9 @@ mod tests { fn litep2p_kp_to_substrate_kp() { let kp = litep2p_ed25519::Keypair::generate(); let kp1: Keypair = kp.clone().into(); - let kp2 = Keypair::try_from_bytes(&mut kp.encode()).unwrap(); + let kp2 = Keypair::try_from_bytes(&mut kp.to_bytes()).unwrap(); - assert_eq!(kp.encode(), kp1.to_bytes()); + assert_eq!(kp.to_bytes(), kp1.to_bytes()); let msg = "hello world".as_bytes(); let sig = kp.sign(msg); @@ -439,10 +439,10 @@ mod tests { let pk1: libp2p_ed25519::PublicKey = pk.clone().into(); let pk2: litep2p_ed25519::PublicKey = pk.clone().into(); let pk3 = libp2p_ed25519::PublicKey::try_from_bytes(&pk_bytes).unwrap(); - let pk4 = litep2p_ed25519::PublicKey::decode(&pk_bytes).unwrap(); + let pk4 = litep2p_ed25519::PublicKey::try_from_bytes(&pk_bytes).unwrap(); assert_eq!(pk_bytes, pk1.to_bytes()); - assert_eq!(pk_bytes, pk2.encode()); + assert_eq!(pk_bytes, pk2.to_bytes()); let msg = "hello world".as_bytes(); let sig = kp.sign(msg); @@ -458,7 +458,7 @@ mod tests { fn litep2p_pk_to_substrate_pk() { let kp = litep2p_ed25519::Keypair::generate(); let pk = kp.public(); - let pk_bytes = pk.clone().encode(); + let pk_bytes = pk.clone().to_bytes(); let pk1: PublicKey = pk.clone().into(); let pk2 = PublicKey::try_from_bytes(&pk_bytes).unwrap(); @@ -497,7 +497,7 @@ mod tests { let sk1: libp2p_ed25519::SecretKey = sk.clone().into(); let sk2: litep2p_ed25519::SecretKey = sk.clone().into(); let sk3 = libp2p_ed25519::SecretKey::try_from_bytes(&mut sk_bytes.clone()).unwrap(); - let sk4 = litep2p_ed25519::SecretKey::from_bytes(&mut sk_bytes.clone()).unwrap(); + let sk4 = litep2p_ed25519::SecretKey::try_from_bytes(&mut sk_bytes.clone()).unwrap(); let kp: Keypair = sk.into(); let kp1: libp2p_ed25519::Keypair = sk1.into(); diff --git a/substrate/client/network/types/src/multiaddr.rs b/substrate/client/network/types/src/multiaddr.rs index 312bef9baab1254a963d5d886994d0812ea93382..925e24fe70d6d374e82dbee266a625875eecc78e 100644 --- a/substrate/client/network/types/src/multiaddr.rs +++ b/substrate/client/network/types/src/multiaddr.rs @@ -20,8 +20,10 @@ use litep2p::types::multiaddr::{ Error as LiteP2pError, Iter as LiteP2pIter, Multiaddr as LiteP2pMultiaddr, Protocol as LiteP2pProtocol, }; +use multiaddr::Multiaddr as LibP2pMultiaddr; use std::{ fmt::{self, Debug, Display}, + net::{IpAddr, Ipv4Addr, Ipv6Addr}, str::FromStr, }; @@ -102,6 +104,39 @@ impl From for LiteP2pMultiaddr { } } +impl From for Multiaddr { + fn from(multiaddr: LibP2pMultiaddr) -> Self { + multiaddr.into_iter().map(Into::into).collect() + } +} + +impl From for LibP2pMultiaddr { + fn from(multiaddr: Multiaddr) -> Self { + multiaddr.into_iter().map(Into::into).collect() + } +} + +impl From for Multiaddr { + fn from(v: IpAddr) -> Multiaddr { + match v { + IpAddr::V4(a) => a.into(), + IpAddr::V6(a) => a.into(), + } + } +} + +impl From for Multiaddr { + fn from(v: Ipv4Addr) -> Multiaddr { + Protocol::Ip4(v).into() + } +} + +impl From for Multiaddr { + fn from(v: Ipv6Addr) -> Multiaddr { + Protocol::Ip6(v).into() + } +} + impl TryFrom> for Multiaddr { type Error = ParseError; diff --git a/substrate/client/network/types/src/multiaddr/protocol.rs b/substrate/client/network/types/src/multiaddr/protocol.rs index 800d08fe36bd657b855869bcb69a808961e32a5a..aca3a31136860d56d5058301a85ced87fa5ab0f3 100644 --- a/substrate/client/network/types/src/multiaddr/protocol.rs +++ b/substrate/client/network/types/src/multiaddr/protocol.rs @@ -17,12 +17,18 @@ // along with this program. If not, see . use crate::multihash::Multihash; +use libp2p_identity::PeerId; use litep2p::types::multiaddr::Protocol as LiteP2pProtocol; +use multiaddr::Protocol as LibP2pProtocol; use std::{ borrow::Cow, - net::{Ipv4Addr, Ipv6Addr}, + fmt::{self, Debug, Display}, + net::{IpAddr, Ipv4Addr, Ipv6Addr}, }; +// Log target for this file. +const LOG_TARGET: &str = "sub-libp2p"; + /// [`Protocol`] describes all possible multiaddress protocols. #[derive(PartialEq, Eq, Clone, Debug)] pub enum Protocol<'a> { @@ -60,6 +66,37 @@ pub enum Protocol<'a> { Wss(Cow<'a, str>), } +impl<'a> Display for Protocol<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let protocol = LiteP2pProtocol::from(self.clone()); + Display::fmt(&protocol, f) + } +} + +impl<'a> From for Protocol<'a> { + #[inline] + fn from(addr: IpAddr) -> Self { + match addr { + IpAddr::V4(addr) => Protocol::Ip4(addr), + IpAddr::V6(addr) => Protocol::Ip6(addr), + } + } +} + +impl<'a> From for Protocol<'a> { + #[inline] + fn from(addr: Ipv4Addr) -> Self { + Protocol::Ip4(addr) + } +} + +impl<'a> From for Protocol<'a> { + #[inline] + fn from(addr: Ipv6Addr) -> Self { + Protocol::Ip6(addr) + } +} + impl<'a> From> for Protocol<'a> { fn from(protocol: LiteP2pProtocol<'a>) -> Self { match protocol { @@ -136,3 +173,102 @@ impl<'a> From> for LiteP2pProtocol<'a> { } } } + +impl<'a> From> for Protocol<'a> { + fn from(protocol: LibP2pProtocol<'a>) -> Self { + match protocol { + LibP2pProtocol::Dccp(port) => Protocol::Dccp(port), + LibP2pProtocol::Dns(str) => Protocol::Dns(str), + LibP2pProtocol::Dns4(str) => Protocol::Dns4(str), + LibP2pProtocol::Dns6(str) => Protocol::Dns6(str), + LibP2pProtocol::Dnsaddr(str) => Protocol::Dnsaddr(str), + LibP2pProtocol::Http => Protocol::Http, + LibP2pProtocol::Https => Protocol::Https, + LibP2pProtocol::Ip4(ipv4_addr) => Protocol::Ip4(ipv4_addr), + LibP2pProtocol::Ip6(ipv6_addr) => Protocol::Ip6(ipv6_addr), + LibP2pProtocol::P2pWebRtcDirect => Protocol::P2pWebRtcDirect, + LibP2pProtocol::P2pWebRtcStar => Protocol::P2pWebRtcStar, + LibP2pProtocol::Certhash(multihash) => Protocol::Certhash(multihash.into()), + LibP2pProtocol::P2pWebSocketStar => Protocol::P2pWebSocketStar, + LibP2pProtocol::Memory(port) => Protocol::Memory(port), + LibP2pProtocol::Onion(str, port) => Protocol::Onion(str, port), + LibP2pProtocol::Onion3(addr) => Protocol::Onion3(Cow::Owned(*addr.hash()), addr.port()), + LibP2pProtocol::P2p(peer_id) => Protocol::P2p((*peer_id.as_ref()).into()), + LibP2pProtocol::P2pCircuit => Protocol::P2pCircuit, + LibP2pProtocol::Quic => Protocol::Quic, + LibP2pProtocol::QuicV1 => Protocol::QuicV1, + LibP2pProtocol::Sctp(port) => Protocol::Sctp(port), + LibP2pProtocol::Tcp(port) => Protocol::Tcp(port), + LibP2pProtocol::Tls => Protocol::Tls, + LibP2pProtocol::Noise => Protocol::Noise, + LibP2pProtocol::Udp(port) => Protocol::Udp(port), + LibP2pProtocol::Udt => Protocol::Udt, + LibP2pProtocol::Unix(str) => Protocol::Unix(str), + LibP2pProtocol::Utp => Protocol::Utp, + LibP2pProtocol::Ws(str) => Protocol::Ws(str), + LibP2pProtocol::Wss(str) => Protocol::Wss(str), + protocol => { + log::error!( + target: LOG_TARGET, + "Got unsupported multiaddr protocol '{}'", + protocol.tag(), + ); + // Strictly speaking, this conversion is incorrect. But making protocol conversion + // fallible would significantly complicate the client code. As DCCP transport is not + // used by substrate, this conversion should be safe. + // Also, as of `multiaddr-18.1`, all enum variants are actually covered. + Protocol::Dccp(0) + }, + } + } +} + +impl<'a> From> for LibP2pProtocol<'a> { + fn from(protocol: Protocol<'a>) -> Self { + match protocol { + Protocol::Dccp(port) => LibP2pProtocol::Dccp(port), + Protocol::Dns(str) => LibP2pProtocol::Dns(str), + Protocol::Dns4(str) => LibP2pProtocol::Dns4(str), + Protocol::Dns6(str) => LibP2pProtocol::Dns6(str), + Protocol::Dnsaddr(str) => LibP2pProtocol::Dnsaddr(str), + Protocol::Http => LibP2pProtocol::Http, + Protocol::Https => LibP2pProtocol::Https, + Protocol::Ip4(ipv4_addr) => LibP2pProtocol::Ip4(ipv4_addr), + Protocol::Ip6(ipv6_addr) => LibP2pProtocol::Ip6(ipv6_addr), + Protocol::P2pWebRtcDirect => LibP2pProtocol::P2pWebRtcDirect, + Protocol::P2pWebRtcStar => LibP2pProtocol::P2pWebRtcStar, + // Protocol #280 is called `WebRTC` in multiaddr-17.0 and `WebRTCDirect` in + // multiaddr-18.1. + Protocol::WebRTC => LibP2pProtocol::WebRTCDirect, + Protocol::Certhash(multihash) => LibP2pProtocol::Certhash(multihash.into()), + Protocol::P2pWebSocketStar => LibP2pProtocol::P2pWebSocketStar, + Protocol::Memory(port) => LibP2pProtocol::Memory(port), + Protocol::Onion(str, port) => LibP2pProtocol::Onion(str, port), + Protocol::Onion3(str, port) => LibP2pProtocol::Onion3((str.into_owned(), port).into()), + Protocol::P2p(multihash) => + LibP2pProtocol::P2p(PeerId::from_multihash(multihash.into()).unwrap_or_else(|_| { + // This is better than making conversion fallible and complicating the + // client code. + log::error!( + target: LOG_TARGET, + "Received multiaddr with p2p multihash which is not a valid \ + peer_id. Replacing with random peer_id." + ); + PeerId::random() + })), + Protocol::P2pCircuit => LibP2pProtocol::P2pCircuit, + Protocol::Quic => LibP2pProtocol::Quic, + Protocol::QuicV1 => LibP2pProtocol::QuicV1, + Protocol::Sctp(port) => LibP2pProtocol::Sctp(port), + Protocol::Tcp(port) => LibP2pProtocol::Tcp(port), + Protocol::Tls => LibP2pProtocol::Tls, + Protocol::Noise => LibP2pProtocol::Noise, + Protocol::Udp(port) => LibP2pProtocol::Udp(port), + Protocol::Udt => LibP2pProtocol::Udt, + Protocol::Unix(str) => LibP2pProtocol::Unix(str), + Protocol::Utp => LibP2pProtocol::Utp, + Protocol::Ws(str) => LibP2pProtocol::Ws(str), + Protocol::Wss(str) => LibP2pProtocol::Wss(str), + } + } +} diff --git a/substrate/client/network/types/src/multihash.rs b/substrate/client/network/types/src/multihash.rs index 91f5b6353a7187e72fb49b1731ed1be6d8319bff..321211c598d385a903f3e0b24baa9e6c5c3d2965 100644 --- a/substrate/client/network/types/src/multihash.rs +++ b/substrate/client/network/types/src/multihash.rs @@ -156,22 +156,20 @@ impl From for LiteP2pMultihash { } } -// TODO: uncomment this after upgrading `multihash` crate to v0.19.1. -// -// impl From> for Multihash { -// fn from(generic: multihash::MultihashGeneric<64>) -> Self { -// LiteP2pMultihash::wrap(generic.code(), generic.digest()) -// .expect("both have size 64; qed") -// .into() -// } -// } -// -// impl From for multihash::Multihash<64> { -// fn from(multihash: Multihash) -> Self { -// multihash::Multihash::<64>::wrap(multihash.code(), multihash.digest()) -// .expect("both have size 64; qed") -// } -// } +impl From> for Multihash { + fn from(generic: multihash::Multihash<64>) -> Self { + LiteP2pMultihash::wrap(generic.code(), generic.digest()) + .expect("both have size 64; qed") + .into() + } +} + +impl From for multihash::Multihash<64> { + fn from(multihash: Multihash) -> Self { + multihash::Multihash::<64>::wrap(multihash.code(), multihash.digest()) + .expect("both have size 64; qed") + } +} #[cfg(test)] mod tests { diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index 2944ff7f4f49dd879b35a1ba70b214824fbdd089..12e017317b22369ce88b2c0d033166574418dc66 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -16,46 +16,45 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -bytes = "1.1" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -fnv = "1.0.6" -futures = "0.3.30" -futures-timer = "3.0.2" -hyper = { version = "0.14.16", features = ["http2", "stream"] } -hyper-rustls = { version = "0.24.0", features = ["http2"] } -libp2p = "0.51.4" -num_cpus = "1.13" -once_cell = "1.19" -parking_lot = "0.12.1" -rand = "0.8.5" -threadpool = "1.7" -tracing = "0.1.29" -sc-client-api = { path = "../api" } -sc-network = { path = "../network" } -sc-network-common = { path = "../network/common" } -sc-network-types = { path = "../network/types" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sc-utils = { path = "../utils" } -sp-api = { path = "../../primitives/api" } -sp-core = { path = "../../primitives/core" } -sp-offchain = { path = "../../primitives/offchain" } -sp-runtime = { path = "../../primitives/runtime" } -sp-keystore = { path = "../../primitives/keystore" } -sp-externalities = { path = "../../primitives/externalities" } +array-bytes = { workspace = true, default-features = true } +bytes = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } +fnv = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +hyperv14 = { features = ["http2", "stream"], workspace = true, default-features = true } +hyper-rustls = { features = ["http2"], workspace = true } +num_cpus = { workspace = true } +once_cell = { workspace = true } +parking_lot = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +threadpool = { workspace = true } +tracing = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } log = { workspace = true, default-features = true } [dev-dependencies] -async-trait = "0.1" -lazy_static = "1.4.0" -tokio = "1.37" -sc-block-builder = { path = "../block-builder" } -sc-client-db = { path = "../db", default-features = true } -sc-transaction-pool = { path = "../transaction-pool" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +async-trait = { workspace = true } +lazy_static = { workspace = true } +tokio = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-db = { default-features = true, workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } [features] default = [] diff --git a/substrate/client/offchain/src/api/http.rs b/substrate/client/offchain/src/api/http.rs index 46f573341c57948539ef969f6f238923cd2fff22..fda5728b0d03e5dd2445d7f88f85c6987f5bf399 100644 --- a/substrate/client/offchain/src/api/http.rs +++ b/substrate/client/offchain/src/api/http.rs @@ -27,6 +27,8 @@ //! (i.e.: the socket should continue being processed) in the background even if the runtime isn't //! actively calling any function. +use hyperv14 as hyper; + use crate::api::timestamp; use bytes::buf::{Buf, Reader}; use fnv::FnvHashMap; diff --git a/substrate/client/proposer-metrics/Cargo.toml b/substrate/client/proposer-metrics/Cargo.toml index f560ce2d65e6e6336c7fa372618c858616ca134e..98064049b297fe4039f09f7a525f8159cdb4ecf1 100644 --- a/substrate/client/proposer-metrics/Cargo.toml +++ b/substrate/client/proposer-metrics/Cargo.toml @@ -17,4 +17,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } +prometheus-endpoint = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index d8f833e2b8d45eb217fe6f4f9d835038e6167705..fda81b31ee502f985dd9b0574b7231a2e6e83edb 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -16,16 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -sc-chain-spec = { path = "../chain-spec" } -sc-mixnet = { path = "../mixnet" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-core = { path = "../../primitives/core" } -sp-rpc = { path = "../../primitives/rpc" } -sp-runtime = { path = "../../primitives/runtime" } -sp-version = { path = "../../primitives/version" } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +sc-chain-spec = { workspace = true, default-features = true } +sc-mixnet = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 7837c852a1c9b73992b9a24f911ce7bba937cdab..2f51d42bc15048ac71817259d4d090c69d4515ae 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -16,16 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -forwarded-header-value = "0.1.1" -futures = "0.3.30" -governor = "0.6.0" -http = "0.2.8" -hyper = "0.14.27" -ip_network = "0.4.1" -jsonrpsee = { version = "0.22", features = ["server"] } +forwarded-header-value = { workspace = true } +futures = { workspace = true } +governor = { workspace = true } +http = { workspace = true } +http-body-util = { workspace = true } +ip_network = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } +prometheus-endpoint = { workspace = true, default-features = true } +serde = { workspace = true } serde_json = { workspace = true, default-features = true } -tokio = { version = "1.22.0", features = ["parking_lot"] } -tower = { version = "0.4.13", features = ["util"] } -tower-http = { version = "0.4.0", features = ["cors"] } +tokio = { features = ["parking_lot"], workspace = true, default-features = true } +tower = { workspace = true, features = ["util"] } +tower-http = { workspace = true, features = ["cors"] } + +# Dependencies outside the polkadot-sdk workspace +# which requires hyper v1 +hyper = "1.3" diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index ba1fcf5e36771008e139bc69ff492eb645e2bcae..619498b333365ff1fa54672bd6ad94507fcd6d27 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -23,21 +23,16 @@ pub mod middleware; pub mod utils; -use std::{ - convert::Infallible, error::Error as StdError, net::SocketAddr, num::NonZeroU32, time::Duration, -}; +use std::{error::Error as StdError, net::SocketAddr, num::NonZeroU32, sync::Arc, time::Duration}; -use hyper::{ - server::conn::AddrStream, - service::{make_service_fn, service_fn}, -}; use jsonrpsee::{ + core::BoxError, server::{ - middleware::http::ProxyGetRequestLayer, stop_channel, ws, PingConfig, StopHandle, - TowerServiceBuilder, + serve_with_graceful_shutdown, stop_channel, ws, PingConfig, StopHandle, TowerServiceBuilder, }, Methods, RpcModule, }; +use middleware::NodeHealthProxyLayer; use tokio::net::TcpListener; use tower::Service; use utils::{build_rpc_api, format_cors, get_proxy_ip, host_filtering, try_into_cors}; @@ -99,6 +94,7 @@ struct PerConnection { metrics: Option, tokio_handle: tokio::runtime::Handle, service_builder: TowerServiceBuilder, + rate_limit_whitelisted_ips: Arc>, } /// Start RPC server listening on given address. @@ -126,14 +122,14 @@ where rate_limit_trust_proxy_headers, } = config; - let std_listener = TcpListener::bind(addrs.as_slice()).await?.into_std()?; - let local_addr = std_listener.local_addr().ok(); + let listener = TcpListener::bind(addrs.as_slice()).await?; + let local_addr = listener.local_addr().ok(); let host_filter = host_filtering(cors.is_some(), local_addr); let http_middleware = tower::ServiceBuilder::new() .option_layer(host_filter) - // Proxy `GET /health` requests to internal `system_health` method. - .layer(ProxyGetRequestLayer::new("/health", "system_health")?) + // Proxy `GET /health, /health/readiness` requests to the internal `system_health` method. + .layer(NodeHealthProxyLayer::default()) .layer(try_into_cors(cors)?); let mut builder = jsonrpsee::server::Server::builder() @@ -163,20 +159,38 @@ where methods: build_rpc_api(rpc_api).into(), service_builder: builder.to_service_builder(), metrics, - tokio_handle, - stop_handle: stop_handle.clone(), + tokio_handle: tokio_handle.clone(), + stop_handle, + rate_limit_whitelisted_ips: Arc::new(rate_limit_whitelisted_ips), }; - let make_service = make_service_fn(move |addr: &AddrStream| { - let cfg = cfg.clone(); - let rate_limit_whitelisted_ips = rate_limit_whitelisted_ips.clone(); - let ip = addr.remote_addr().ip(); - - async move { - let cfg = cfg.clone(); - let rate_limit_whitelisted_ips = rate_limit_whitelisted_ips.clone(); + tokio_handle.spawn(async move { + loop { + let (sock, remote_addr) = tokio::select! { + res = listener.accept() => { + match res { + Ok(s) => s, + Err(e) => { + log::debug!(target: "rpc", "Failed to accept ipv4 connection: {:?}", e); + continue; + } + } + } + _ = cfg.stop_handle.clone().shutdown() => break, + }; + + let ip = remote_addr.ip(); + let cfg2 = cfg.clone(); + let svc = tower::service_fn(move |req: http::Request| { + let PerConnection { + methods, + service_builder, + metrics, + tokio_handle, + stop_handle, + rate_limit_whitelisted_ips, + } = cfg2.clone(); - Ok::<_, Infallible>(service_fn(move |req| { let proxy_ip = if rate_limit_trust_proxy_headers { get_proxy_ip(&req) } else { None }; @@ -193,9 +207,6 @@ where rate_limit }; - let PerConnection { service_builder, metrics, tokio_handle, stop_handle, methods } = - cfg.clone(); - let is_websocket = ws::is_upgrade_request(&req); let transport_label = if is_websocket { "ws" } else { "http" }; @@ -215,7 +226,6 @@ where let rpc_middleware = RpcServiceBuilder::new().option_layer(middleware_layer.clone()); - let mut svc = service_builder.set_rpc_middleware(rpc_middleware).build(methods, stop_handle); @@ -232,17 +242,19 @@ where }); } - svc.call(req).await + // https://github.com/rust-lang/rust/issues/102211 the error type can't be inferred + // to be `Box` so we need to convert it to + // a concrete type as workaround. + svc.call(req).await.map_err(|e| BoxError::from(e)) } - })) - } - }); - - let server = hyper::Server::from_tcp(std_listener)?.serve(make_service); + }); - tokio::spawn(async move { - let graceful = server.with_graceful_shutdown(async move { stop_handle.shutdown().await }); - let _ = graceful.await; + cfg.tokio_handle.spawn(serve_with_graceful_shutdown( + sock, + svc, + cfg.stop_handle.clone().shutdown(), + )); + } }); log::info!( diff --git a/substrate/client/rpc-servers/src/middleware/mod.rs b/substrate/client/rpc-servers/src/middleware/mod.rs index 88ed8b2f433580fa2d87f1a70eacc32019027f2a..0a14be4dacf59f2c3dcae90d64653461fe48eb37 100644 --- a/substrate/client/rpc-servers/src/middleware/mod.rs +++ b/substrate/client/rpc-servers/src/middleware/mod.rs @@ -32,9 +32,11 @@ use jsonrpsee::{ }; mod metrics; +mod node_health; mod rate_limit; pub use metrics::*; +pub use node_health::*; pub use rate_limit::*; const MAX_JITTER: Duration = Duration::from_millis(50); diff --git a/substrate/client/rpc-servers/src/middleware/node_health.rs b/substrate/client/rpc-servers/src/middleware/node_health.rs new file mode 100644 index 0000000000000000000000000000000000000000..69c9e0829ac98286e8ac4d5e6ddf55772862cd41 --- /dev/null +++ b/substrate/client/rpc-servers/src/middleware/node_health.rs @@ -0,0 +1,203 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Middleware for handling `/health` and `/health/readiness` endpoints. + +use std::{ + error::Error, + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use futures::future::FutureExt; +use http::{HeaderValue, Method, StatusCode, Uri}; +use jsonrpsee::{ + server::{HttpBody, HttpRequest, HttpResponse}, + types::{Response as RpcResponse, ResponseSuccess as RpcResponseSuccess}, +}; +use tower::Service; + +const RPC_SYSTEM_HEALTH_CALL: &str = r#"{"jsonrpc":"2.0","method":"system_health","id":0}"#; +const HEADER_VALUE_JSON: HeaderValue = HeaderValue::from_static("application/json; charset=utf-8"); + +/// Layer that applies [`NodeHealthProxy`] which +/// proxies `/health` and `/health/readiness` endpoints. +#[derive(Debug, Clone, Default)] +pub struct NodeHealthProxyLayer; + +impl tower::Layer for NodeHealthProxyLayer { + type Service = NodeHealthProxy; + + fn layer(&self, service: S) -> Self::Service { + NodeHealthProxy::new(service) + } +} + +/// Middleware that proxies `/health` and `/health/readiness` endpoints. +pub struct NodeHealthProxy(S); + +impl NodeHealthProxy { + /// Creates a new [`NodeHealthProxy`]. + pub fn new(service: S) -> Self { + Self(service) + } +} + +impl tower::Service> for NodeHealthProxy +where + S: Service, + S::Response: 'static, + S::Error: Into> + 'static, + S::Future: Send + 'static, +{ + type Response = S::Response; + type Error = Box; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.0.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, req: http::Request) -> Self::Future { + let mut req = req.map(|body| HttpBody::new(body)); + let maybe_intercept = InterceptRequest::from_http(&req); + + // Modify the request and proxy it to `system_health` + if let InterceptRequest::Health | InterceptRequest::Readiness = maybe_intercept { + // RPC methods are accessed with `POST`. + *req.method_mut() = Method::POST; + // Precautionary remove the URI. + *req.uri_mut() = Uri::from_static("/"); + + // Requests must have the following headers: + req.headers_mut().insert(http::header::CONTENT_TYPE, HEADER_VALUE_JSON); + req.headers_mut().insert(http::header::ACCEPT, HEADER_VALUE_JSON); + + // Adjust the body to reflect the method call. + req = req.map(|_| HttpBody::from(RPC_SYSTEM_HEALTH_CALL)); + } + + // Call the inner service and get a future that resolves to the response. + let fut = self.0.call(req); + + async move { + let res = fut.await.map_err(|err| err.into())?; + + Ok(match maybe_intercept { + InterceptRequest::Deny => + http_response(StatusCode::METHOD_NOT_ALLOWED, HttpBody::empty()), + InterceptRequest::No => res, + InterceptRequest::Health => { + let health = parse_rpc_response(res.into_body()).await?; + http_ok_response(serde_json::to_string(&health)?) + }, + InterceptRequest::Readiness => { + let health = parse_rpc_response(res.into_body()).await?; + if (!health.is_syncing && health.peers > 0) || !health.should_have_peers { + http_ok_response(HttpBody::empty()) + } else { + http_internal_error() + } + }, + }) + } + .boxed() + } +} + +// NOTE: This is duplicated here to avoid dependency to the `RPC API`. +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +struct Health { + /// Number of connected peers + pub peers: usize, + /// Is the node syncing + pub is_syncing: bool, + /// Should this node have any peers + /// + /// Might be false for local chains or when running without discovery. + pub should_have_peers: bool, +} + +fn http_ok_response>(body: S) -> HttpResponse { + http_response(StatusCode::OK, body) +} + +fn http_response>(status_code: StatusCode, body: S) -> HttpResponse { + HttpResponse::builder() + .status(status_code) + .header(http::header::CONTENT_TYPE, HEADER_VALUE_JSON) + .body(body.into()) + .expect("Header is valid; qed") +} + +fn http_internal_error() -> HttpResponse { + http_response(hyper::StatusCode::INTERNAL_SERVER_ERROR, HttpBody::empty()) +} + +async fn parse_rpc_response( + body: HttpBody, +) -> Result> { + use http_body_util::BodyExt; + + let bytes = body.collect().await?.to_bytes(); + + let raw_rp = serde_json::from_slice::>(&bytes)?; + let rp = RpcResponseSuccess::::try_from(raw_rp)?; + + Ok(rp.result) +} + +/// Whether the request should be treated as ordinary RPC call or be modified. +enum InterceptRequest { + /// Proxy `/health` to `system_health`. + Health, + /// Checks if node has at least one peer and is not doing major syncing. + /// + /// Returns HTTP status code 200 on success otherwise HTTP status code 500 is returned. + Readiness, + /// Treat as a ordinary RPC call and don't modify the request or response. + No, + /// Deny health or readiness calls that is not HTTP GET request. + /// + /// Returns HTTP status code 405. + Deny, +} + +impl InterceptRequest { + fn from_http(req: &HttpRequest) -> InterceptRequest { + match req.uri().path() { + "/health" => + if req.method() == http::Method::GET { + InterceptRequest::Health + } else { + InterceptRequest::Deny + }, + "/health/readiness" => + if req.method() == http::Method::GET { + InterceptRequest::Readiness + } else { + InterceptRequest::Deny + }, + // Forward all other requests to the RPC server. + _ => InterceptRequest::No, + } + } +} diff --git a/substrate/client/rpc-servers/src/utils.rs b/substrate/client/rpc-servers/src/utils.rs index d99b8e637d9df3f1322ee8083342f449f958a311..d9d943c7c1fb31fe09ed532cd219a537ebf01b52 100644 --- a/substrate/client/rpc-servers/src/utils.rs +++ b/substrate/client/rpc-servers/src/utils.rs @@ -25,10 +25,7 @@ use std::{ }; use forwarded_header_value::ForwardedHeaderValue; -use hyper::{ - header::{HeaderName, HeaderValue}, - Request, -}; +use http::header::{HeaderName, HeaderValue}; use jsonrpsee::{server::middleware::http::HostFilterLayer, RpcModule}; use tower_http::cors::{AllowOrigin, CorsLayer}; @@ -57,7 +54,7 @@ pub(crate) fn build_rpc_api(mut rpc_api: RpcModule) available_methods.sort(); rpc_api - .register_method("rpc_methods", move |_, _| { + .register_method("rpc_methods", move |_, _, _| { serde_json::json!({ "methods": available_methods, }) @@ -96,7 +93,7 @@ pub(crate) fn format_cors(maybe_cors: Option<&Vec>) -> String { /// 1. `Forwarded` header. /// 2. `X-Forwarded-For` header. /// 3. `X-Real-Ip`. -pub(crate) fn get_proxy_ip(req: &Request) -> Option { +pub(crate) fn get_proxy_ip(req: &http::Request) -> Option { if let Some(ip) = req .headers() .get(&FORWARDED) @@ -133,9 +130,10 @@ pub(crate) fn get_proxy_ip(req: &Request) -> Option { mod tests { use super::*; use hyper::header::HeaderValue; + use jsonrpsee::server::{HttpBody, HttpRequest}; - fn request() -> hyper::Request { - hyper::Request::builder().body(hyper::Body::empty()).unwrap() + fn request() -> http::Request { + HttpRequest::builder().body(HttpBody::empty()).unwrap() } #[test] diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index 8977c842d03806c50372fc58452d5c35206ae163..0fcf5fd34e82c674feaccefb99d95a077fc9dcbd 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -16,46 +16,46 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } # Internal chain structures for "chain_spec". -sc-chain-spec = { path = "../chain-spec" } +sc-chain-spec = { workspace = true, default-features = true } # Pool for submitting extrinsics required by "transaction" -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-core = { path = "../../primitives/core" } -sp-runtime = { path = "../../primitives/runtime" } -sp-api = { path = "../../primitives/api" } -sp-rpc = { path = "../../primitives/rpc" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-version = { path = "../../primitives/version" } -sc-client-api = { path = "../api" } -sc-utils = { path = "../utils" } -sc-rpc = { path = "../rpc" } -codec = { package = "parity-scale-codec", version = "3.6.12" } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } thiserror = { workspace = true } serde = { workspace = true, default-features = true } -hex = "0.4" -futures = "0.3.30" -parking_lot = "0.12.1" -tokio-stream = { version = "0.1.14", features = ["sync"] } -tokio = { version = "1.22.0", features = ["sync"] } -array-bytes = "6.2.2" +hex = { workspace = true, default-features = true } +futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } +tokio-stream = { features = ["sync"], workspace = true } +tokio = { features = ["sync"], workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -futures-util = { version = "0.3.30", default-features = false } -rand = "0.8.5" -schnellru = "0.2.1" +futures-util = { workspace = true } +rand = { workspace = true, default-features = true } +schnellru = { workspace = true } [dev-dependencies] -jsonrpsee = { version = "0.22", features = ["server", "ws-client"] } +jsonrpsee = { features = ["server", "ws-client"], workspace = true } serde_json = { workspace = true, default-features = true } -tokio = { version = "1.22.0", features = ["macros"] } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -substrate-test-runtime = { path = "../../test-utils/runtime" } -substrate-test-runtime-transaction-pool = { path = "../../test-utils/runtime/transaction-pool" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-externalities = { path = "../../primitives/externalities" } -sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } -sc-block-builder = { path = "../block-builder" } -sc-service = { path = "../service", features = ["test-helpers"] } -assert_matches = "1.3.0" -pretty_assertions = "1.2.1" -sc-transaction-pool = { path = "../transaction-pool" } +tokio = { features = ["macros"], workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-transaction-pool = { workspace = true } +sp-consensus = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +assert_matches = { workspace = true } +pretty_assertions = { workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index 23cb0bbf54585383e797a452d9747cac36649ad1..128d803521f6bd9eb2d077d54b604b3269874e14 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -54,7 +54,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_body", raw_method)] + #[method(name = "chainHead_v1_body", with_extensions)] async fn chain_head_unstable_body( &self, follow_subscription: String, @@ -73,7 +73,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_header", raw_method)] + #[method(name = "chainHead_v1_header", with_extensions)] async fn chain_head_unstable_header( &self, follow_subscription: String, @@ -85,7 +85,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_storage", raw_method)] + #[method(name = "chainHead_v1_storage", with_extensions)] async fn chain_head_unstable_storage( &self, follow_subscription: String, @@ -99,7 +99,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_call", raw_method)] + #[method(name = "chainHead_v1_call", with_extensions)] async fn chain_head_unstable_call( &self, follow_subscription: String, @@ -118,7 +118,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_unpin", raw_method)] + #[method(name = "chainHead_v1_unpin", with_extensions)] async fn chain_head_unstable_unpin( &self, follow_subscription: String, @@ -131,7 +131,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_continue", raw_method)] + #[method(name = "chainHead_v1_continue", with_extensions)] async fn chain_head_unstable_continue( &self, follow_subscription: String, @@ -145,7 +145,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_stopOperation", raw_method)] + #[method(name = "chainHead_v1_stopOperation", with_extensions)] async fn chain_head_unstable_stop_operation( &self, follow_subscription: String, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 6779180a414661eac923afca9a3917b340bc5487..a056b4d437c8dffa491225af40677d12217f9a75 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -36,7 +36,7 @@ use crate::{ use codec::Encode; use futures::{channel::oneshot, future::FutureExt}; use jsonrpsee::{ - core::async_trait, server::ResponsePayload, types::SubscriptionId, ConnectionDetails, + core::async_trait, server::ResponsePayload, types::SubscriptionId, ConnectionId, Extensions, MethodResponseFuture, PendingSubscriptionSink, SubscriptionSink, }; use log::debug; @@ -251,14 +251,16 @@ where async fn chain_head_unstable_body( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash: Block::Hash, ) -> ResponsePayload<'static, MethodResponse> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { // The spec says to return `LimitReached` if the follow subscription is invalid or // stale. return ResponsePayload::success(MethodResponse::LimitReached); @@ -335,14 +337,16 @@ where async fn chain_head_unstable_header( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash: Block::Hash, ) -> Result, ChainHeadRpcError> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { return Ok(None); } @@ -371,16 +375,18 @@ where async fn chain_head_unstable_storage( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash: Block::Hash, items: Vec>, child_trie: Option, ) -> ResponsePayload<'static, MethodResponse> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { // The spec says to return `LimitReached` if the follow subscription is invalid or // stale. return ResponsePayload::success(MethodResponse::LimitReached); @@ -452,7 +458,7 @@ where async fn chain_head_unstable_call( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash: Block::Hash, function: String, @@ -463,10 +469,12 @@ where Err(err) => return ResponsePayload::error(err), }; - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { // The spec says to return `LimitReached` if the follow subscription is invalid or // stale. return ResponsePayload::success(MethodResponse::LimitReached); @@ -530,14 +538,16 @@ where async fn chain_head_unstable_unpin( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash_or_hashes: ListOrValue, ) -> Result<(), ChainHeadRpcError> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { return Ok(()); } @@ -566,14 +576,16 @@ where async fn chain_head_unstable_continue( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { return Ok(()) } @@ -592,14 +604,16 @@ where async fn chain_head_unstable_stop_operation( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { return Ok(()) } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index a753896b24c238f949a8f4698ea7b4c63e39746f..6dc3df76bdd79ea6ea5c7c0f8765aa8d0c8e9638 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -32,7 +32,7 @@ use futures::{ }; use futures_util::future::Either; use jsonrpsee::SubscriptionSink; -use log::{debug, error}; +use log::debug; use sc_client_api::{ Backend, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, }; @@ -572,7 +572,7 @@ where // The information from `.info()` is updated from the DB as the last // step of the finalization and it should be up to date. // If the info is outdated, there is nothing the RPC can do for now. - error!( + debug!( target: LOG_TARGET, "[follow][id={:?}] Client does not contain different best block", self.sub_id, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index a6edc344bc63fda4cb04caa21dfd20b2df8bd0ad..d4d616f54dc88c9909cb8bfadc720da01afd2030 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -846,6 +846,7 @@ impl> SubscriptionsInner { #[cfg(test)] mod tests { use super::*; + use jsonrpsee::ConnectionId; use sc_block_builder::BlockBuilderBuilder; use sc_service::client::new_in_mem; use sp_consensus::BlockOrigin; @@ -1420,17 +1421,20 @@ mod tests { rpc_connections.clone(), ); - let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); - let mut reserved_sub_second = subscription_management.reserve_subscription(1).unwrap(); + let reserved_sub_first = + subscription_management.reserve_subscription(ConnectionId(1)).unwrap(); + let mut reserved_sub_second = + subscription_management.reserve_subscription(ConnectionId(1)).unwrap(); // Subscriptions reserved but not yet populated. assert_eq!(subs.read().subs.len(), 0); // Cannot reserve anymore. - assert!(subscription_management.reserve_subscription(1).is_none()); + assert!(subscription_management.reserve_subscription(ConnectionId(1)).is_none()); // Drop the first subscription. drop(reserved_sub_first); // Space is freed-up for the rpc connections. - let mut reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + let mut reserved_sub_first = + subscription_management.reserve_subscription(ConnectionId(1)).unwrap(); // Insert subscriptions. let _sub_data_first = @@ -1445,7 +1449,8 @@ mod tests { // Check that the subscription is removed. assert_eq!(subs.read().subs.len(), 1); // Space is freed-up for the rpc connections. - let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + let reserved_sub_first = + subscription_management.reserve_subscription(ConnectionId(1)).unwrap(); // Drop all subscriptions. drop(reserved_sub_first); diff --git a/substrate/client/rpc-spec-v2/src/common/connections.rs b/substrate/client/rpc-spec-v2/src/common/connections.rs index c16a80bf49db93853ca223ab632dbc544082ae44..f0c31d612ebdb1a55b6bf97f8cda7f8fdccffd87 100644 --- a/substrate/client/rpc-spec-v2/src/common/connections.rs +++ b/substrate/client/rpc-spec-v2/src/common/connections.rs @@ -195,68 +195,71 @@ mod tests { #[test] fn reserve_space() { let rpc_connections = RpcConnections::new(2); - let reserved = rpc_connections.reserve_space(1); + let conn_id = ConnectionId(1); + let reserved = rpc_connections.reserve_space(conn_id); + assert!(reserved.is_some()); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); assert_eq!(rpc_connections.data.lock().len(), 1); let reserved = reserved.unwrap(); let registered = reserved.register("identifier1".to_string()).unwrap(); - assert!(rpc_connections.contains_identifier(1, "identifier1")); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(conn_id, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); drop(registered); // Data is dropped. - assert!(rpc_connections.data.lock().get(&1).is_none()); + assert!(rpc_connections.data.lock().get(&conn_id).is_none()); assert!(rpc_connections.data.lock().is_empty()); // Checks can still happen. - assert!(!rpc_connections.contains_identifier(1, "identifier1")); + assert!(!rpc_connections.contains_identifier(conn_id, "identifier1")); } #[test] fn reserve_space_capacity_reached() { let rpc_connections = RpcConnections::new(2); + let conn_id = ConnectionId(1); // Reserve identifier for connection 1. - let reserved = rpc_connections.reserve_space(1); + let reserved = rpc_connections.reserve_space(conn_id); assert!(reserved.is_some()); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Add identifier for connection 1. let reserved = reserved.unwrap(); let registered = reserved.register("identifier1".to_string()).unwrap(); - assert!(rpc_connections.contains_identifier(1, "identifier1")); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(conn_id, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Reserve identifier for connection 1 again. - let reserved = rpc_connections.reserve_space(1); + let reserved = rpc_connections.reserve_space(conn_id); assert!(reserved.is_some()); - assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(2, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Add identifier for connection 1 again. let reserved = reserved.unwrap(); let registered_second = reserved.register("identifier2".to_string()).unwrap(); - assert!(rpc_connections.contains_identifier(1, "identifier2")); - assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(conn_id, "identifier2")); + assert_eq!(2, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Cannot reserve more identifiers. - let reserved = rpc_connections.reserve_space(1); + let reserved = rpc_connections.reserve_space(conn_id); assert!(reserved.is_none()); // Drop the first identifier. drop(registered); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); - assert!(rpc_connections.contains_identifier(1, "identifier2")); - assert!(!rpc_connections.contains_identifier(1, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(conn_id, "identifier2")); + assert!(!rpc_connections.contains_identifier(conn_id, "identifier1")); // Can reserve again after clearing the space. - let reserved = rpc_connections.reserve_space(1); + let reserved = rpc_connections.reserve_space(conn_id); assert!(reserved.is_some()); - assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(2, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Ensure data is cleared. drop(reserved); drop(registered_second); - assert!(rpc_connections.data.lock().get(&1).is_none()); + assert!(rpc_connections.data.lock().get(&conn_id).is_none()); } } diff --git a/substrate/client/rpc-spec-v2/src/transaction/api.rs b/substrate/client/rpc-spec-v2/src/transaction/api.rs index ed358922d53ed0d087a2693b55a9b4f51635c1c0..6af6f1678440afe6bf01805971903cdf5ffe8e45 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/api.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/api.rs @@ -48,7 +48,7 @@ pub trait TransactionBroadcastApi { /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_v1_broadcast", raw_method)] + #[method(name = "transaction_v1_broadcast", with_extensions)] async fn broadcast(&self, bytes: Bytes) -> RpcResult>; /// Broadcast an extrinsic to the chain. @@ -56,6 +56,6 @@ pub trait TransactionBroadcastApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_v1_stop", raw_method)] + #[method(name = "transaction_v1_stop", with_extensions)] async fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast>; } diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs index 68c19010e31c5653509c380f6be69c072e6fe9bb..2fd4ce2454565dfaa4f25c607b1fed83650a7f45 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs @@ -27,7 +27,7 @@ use futures::{FutureExt, Stream, StreamExt}; use futures_util::stream::AbortHandle; use jsonrpsee::{ core::{async_trait, RpcResult}, - ConnectionDetails, + ConnectionId, Extensions, }; use parking_lot::RwLock; use rand::{distributions::Alphanumeric, Rng}; @@ -121,19 +121,18 @@ where ::Hash: Unpin, Client: HeaderBackend + BlockchainEvents + Send + Sync + 'static, { - async fn broadcast( - &self, - connection_details: ConnectionDetails, - bytes: Bytes, - ) -> RpcResult> { + async fn broadcast(&self, ext: &Extensions, bytes: Bytes) -> RpcResult> { let pool = self.pool.clone(); + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); // The unique ID of this operation. let id = self.generate_unique_id(); // Ensure that the connection has not reached the maximum number of active operations. - let Some(reserved_connection) = self.rpc_connections.reserve_space(connection_details.id()) - else { + let Some(reserved_connection) = self.rpc_connections.reserve_space(conn_id) else { return Ok(None) }; let Some(reserved_identifier) = reserved_connection.register(id.clone()) else { @@ -245,11 +244,16 @@ where async fn stop_broadcast( &self, - connection_details: ConnectionDetails, + ext: &Extensions, operation_id: String, ) -> Result<(), ErrorBroadcast> { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + // The operation ID must correlate to the same connection ID. - if !self.rpc_connections.contains_identifier(connection_details.id(), &operation_id) { + if !self.rpc_connections.contains_identifier(conn_id, &operation_id) { return Err(ErrorBroadcast::InvalidOperationID) } diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index 7dd46b2ab4c31e7be4168e588a8c95e900cb213a..545d02bb3043868addece16ad24549aa9c1f8a1d 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -16,46 +16,46 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -jsonrpsee = { version = "0.22", features = ["server"] } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-block-builder = { path = "../block-builder" } -sc-chain-spec = { path = "../chain-spec" } -sc-client-api = { path = "../api" } -sc-mixnet = { path = "../mixnet" } -sc-rpc-api = { path = "../rpc-api" } -sc-tracing = { path = "../tracing" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sc-utils = { path = "../utils" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } -sp-offchain = { path = "../../primitives/offchain" } -sp-rpc = { path = "../../primitives/rpc" } -sp-runtime = { path = "../../primitives/runtime" } -sp-session = { path = "../../primitives/session" } -sp-version = { path = "../../primitives/version" } -sp-statement-store = { path = "../../primitives/statement-store" } -tokio = "1.37" +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-mixnet = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } [dev-dependencies] -env_logger = "0.11" -assert_matches = "1.3.0" -sc-block-builder = { path = "../block-builder" } -sc-network = { path = "../network" } -sc-network-common = { path = "../network/common" } -sc-transaction-pool = { path = "../transaction-pool" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -tokio = "1.37" -sp-io = { path = "../../primitives/io" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -pretty_assertions = "1.2.1" -tracing-subscriber = { version = "0.3", features = ["env-filter"] } +env_logger = { workspace = true } +assert_matches = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +pretty_assertions = { workspace = true } +tracing-subscriber = { features = ["env-filter"], workspace = true } [features] test-helpers = [] diff --git a/substrate/client/rpc/src/utils.rs b/substrate/client/rpc/src/utils.rs index 3b5372615e733dba860faa453063548189fc6354..bc566ed37f2301c4a077f4918a2d4e521153a83f 100644 --- a/substrate/client/rpc/src/utils.rs +++ b/substrate/client/rpc/src/utils.rs @@ -130,7 +130,7 @@ async fn inner_pipe_from_stream( "Subscription buffer limit={} exceeded for subscription={} conn_id={}; dropping subscription", buf.max_cap, sink.method_name(), - sink.connection_id() + sink.connection_id().0 ); return } @@ -189,7 +189,7 @@ mod tests { async fn subscribe() -> Subscription { let mut module = RpcModule::new(()); module - .register_subscription("sub", "my_sub", "unsub", |_, pending, _| async move { + .register_subscription("sub", "my_sub", "unsub", |_, pending, _, _| async move { let stream = futures::stream::iter([0; 16]); pipe_from_stream(pending, stream).await; Ok(()) @@ -217,7 +217,7 @@ mod tests { let mut module = RpcModule::new(tx); module - .register_subscription("sub", "my_sub", "unsub", |_, pending, ctx| async move { + .register_subscription("sub", "my_sub", "unsub", |_, pending, ctx, _| async move { let stream = futures::stream::iter([0; 32]); pipe_from_stream(pending, stream).await; _ = ctx.unbounded_send(()); @@ -239,16 +239,21 @@ mod tests { let mut module = RpcModule::new(notify_tx); module - .register_subscription("sub", "my_sub", "unsub", |_, pending, notify_tx| async move { - // emulate empty stream for simplicity: otherwise we need some mechanism - // to sync buffer and channel send operations - let stream = futures::stream::empty::<()>(); - // this should exit immediately - pipe_from_stream(pending, stream).await; - // notify that the `pipe_from_stream` has returned - notify_tx.notify_one(); - Ok(()) - }) + .register_subscription( + "sub", + "my_sub", + "unsub", + |_, pending, notify_tx, _| async move { + // emulate empty stream for simplicity: otherwise we need some mechanism + // to sync buffer and channel send operations + let stream = futures::stream::empty::<()>(); + // this should exit immediately + pipe_from_stream(pending, stream).await; + // notify that the `pipe_from_stream` has returned + notify_tx.notify_one(); + Ok(()) + }, + ) .unwrap(); module.subscribe("sub", EmptyServerParams::new(), 1).await.unwrap(); diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index dfdd485f15c00550119d9aebf8594b33cdba4df1..724e2ddfe56a539c0263e4bcf555777d1e81802e 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -28,64 +28,64 @@ runtime-benchmarks = [ ] [dependencies] -jsonrpsee = { version = "0.22", features = ["server"] } +jsonrpsee = { features = ["server"], workspace = true } thiserror = { workspace = true } -futures = "0.3.30" -rand = "0.8.5" -parking_lot = "0.12.1" +futures = { workspace = true } +rand = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -futures-timer = "3.0.1" -exit-future = "0.2.0" -pin-project = "1.0.12" +futures-timer = { workspace = true } +exit-future = { workspace = true } +pin-project = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-keystore = { path = "../keystore" } -sp-runtime = { path = "../../primitives/runtime" } -sp-trie = { path = "../../primitives/trie" } -sp-externalities = { path = "../../primitives/externalities" } -sc-utils = { path = "../utils" } -sp-version = { path = "../../primitives/version" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } -sp-session = { path = "../../primitives/session" } -sp-state-machine = { path = "../../primitives/state-machine" } -sp-consensus = { path = "../../primitives/consensus/common" } -sc-consensus = { path = "../consensus/common" } -sp-storage = { path = "../../primitives/storage" } -sc-network = { path = "../network" } -sc-network-common = { path = "../network/common" } -sc-network-light = { path = "../network/light" } -sc-network-sync = { path = "../network/sync" } -sc-network-types = { path = "../network/types" } -sc-network-transactions = { path = "../network/transactions" } -sc-chain-spec = { path = "../chain-spec" } -sc-client-api = { path = "../api" } -sp-api = { path = "../../primitives/api" } -sc-client-db = { path = "../db", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12" } -sc-executor = { path = "../executor" } -sc-transaction-pool = { path = "../transaction-pool" } -sp-transaction-pool = { path = "../../primitives/transaction-pool" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof" } -sc-rpc-server = { path = "../rpc-servers" } -sc-rpc = { path = "../rpc" } -sc-rpc-spec-v2 = { path = "../rpc-spec-v2" } -sc-informant = { path = "../informant" } -sc-telemetry = { path = "../telemetry" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-tracing = { path = "../tracing" } -sc-sysinfo = { path = "../sysinfo" } -tracing = "0.1.29" -tracing-futures = { version = "0.2.4" } -async-trait = "0.1.79" -tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "time"] } -tempfile = "3.1.0" -directories = "5.0.1" -static_init = "1.0.3" -schnellru = "0.2.1" +sc-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-light = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-network-transactions = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } +codec = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-transaction-storage-proof = { workspace = true, default-features = true } +sc-rpc-server = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } +sc-informant = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } +tracing-futures = { workspace = true } +async-trait = { workspace = true } +tokio = { features = ["parking_lot", "rt-multi-thread", "time"], workspace = true, default-features = true } +tempfile = { workspace = true } +directories = { workspace = true } +static_init = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -substrate-test-runtime = { path = "../../test-utils/runtime" } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime = { workspace = true } diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 9da4d2192576909f95428933623a1e3160e1f9ad..1341aa0e72051f89bcd51d2feab3c24051fc124e 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -16,19 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use super::{code_provider::CodeProvider, ClientConfig}; use sc_client_api::{ backend, call_executor::CallExecutor, execution_extensions::ExecutionExtensions, HeaderBackend, }; use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_api::ProofRecorder; -use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode}; +use sp_core::traits::{CallContext, CodeExecutor}; use sp_externalities::Extensions; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, HashingFor}, }; -use sp_state_machine::{backend::AsTrieBackend, Ext, OverlayedChanges, StateMachine, StorageProof}; +use sp_state_machine::{backend::AsTrieBackend, OverlayedChanges, StateMachine, StorageProof}; use std::{cell::RefCell, sync::Arc}; /// Call executor that executes methods locally, querying all required @@ -36,8 +36,7 @@ use std::{cell::RefCell, sync::Arc}; pub struct LocalCallExecutor { backend: Arc, executor: E, - wasm_override: Arc>, - wasm_substitutes: WasmSubstitutes, + code_provider: CodeProvider, execution_extensions: Arc>, } @@ -53,81 +52,15 @@ where client_config: ClientConfig, execution_extensions: ExecutionExtensions, ) -> sp_blockchain::Result { - let wasm_override = client_config - .wasm_runtime_overrides - .as_ref() - .map(|p| WasmOverride::new(p.clone(), &executor)) - .transpose()?; - - let wasm_substitutes = WasmSubstitutes::new( - client_config.wasm_runtime_substitutes, - executor.clone(), - backend.clone(), - )?; + let code_provider = CodeProvider::new(&client_config, executor.clone(), backend.clone())?; Ok(LocalCallExecutor { backend, executor, - wasm_override: Arc::new(wasm_override), - wasm_substitutes, + code_provider, execution_extensions: Arc::new(execution_extensions), }) } - - /// Check if local runtime code overrides are enabled and one is available - /// for the given `BlockId`. If yes, return it; otherwise return the same - /// `RuntimeCode` instance that was passed. - fn check_override<'a>( - &'a self, - onchain_code: RuntimeCode<'a>, - state: &B::State, - hash: Block::Hash, - ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> - where - Block: BlockT, - B: backend::Backend, - { - let on_chain_version = self.on_chain_runtime_version(&onchain_code, state)?; - let code_and_version = if let Some(d) = self.wasm_override.as_ref().as_ref().and_then(|o| { - o.get( - &on_chain_version.spec_version, - onchain_code.heap_pages, - &on_chain_version.spec_name, - ) - }) { - log::debug!(target: "wasm_overrides", "using WASM override for block {}", hash); - d - } else if let Some(s) = - self.wasm_substitutes - .get(on_chain_version.spec_version, onchain_code.heap_pages, hash) - { - log::debug!(target: "wasm_substitutes", "Using WASM substitute for block {:?}", hash); - s - } else { - log::debug!( - target: "wasm_overrides", - "Neither WASM override nor substitute available for block {hash}, using onchain code", - ); - (onchain_code, on_chain_version) - }; - - Ok(code_and_version) - } - - /// Returns the on chain runtime version. - fn on_chain_runtime_version( - &self, - code: &RuntimeCode, - state: &B::State, - ) -> sp_blockchain::Result { - let mut overlay = OverlayedChanges::default(); - - let mut ext = Ext::new(&mut overlay, state, None); - - self.executor - .runtime_version(&mut ext, code) - .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string())) - } } impl Clone for LocalCallExecutor @@ -138,8 +71,7 @@ where LocalCallExecutor { backend: self.backend.clone(), executor: self.executor.clone(), - wasm_override: self.wasm_override.clone(), - wasm_substitutes: self.wasm_substitutes.clone(), + code_provider: self.code_provider.clone(), execution_extensions: self.execution_extensions.clone(), } } @@ -175,7 +107,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let runtime_code = self.code_provider.maybe_override_code(runtime_code, &state, at_hash)?.0; let mut extensions = self.execution_extensions.extensions(at_hash, at_number); @@ -215,7 +147,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let runtime_code = self.code_provider.maybe_override_code(runtime_code, &state, at_hash)?.0; let mut extensions = extensions.borrow_mut(); match recorder { @@ -263,7 +195,9 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - self.check_override(runtime_code, &state, at_hash).map(|(_, v)| v) + self.code_provider + .maybe_override_code(runtime_code, &state, at_hash) + .map(|(_, v)| v) } fn prove_execution( @@ -281,7 +215,7 @@ where let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let runtime_code = self.code_provider.maybe_override_code(runtime_code, &state, at_hash)?.0; sp_state_machine::prove_execution_on_trie_backend( trie_backend, @@ -331,133 +265,3 @@ where self.executor.native_version() } } - -#[cfg(test)] -mod tests { - use super::*; - use backend::Backend; - use sc_client_api::in_mem; - use sc_executor::WasmExecutor; - use sp_core::{ - testing::TaskExecutor, - traits::{FetchRuntimeCode, WrappedRuntimeCode}, - }; - use std::collections::HashMap; - use substrate_test_runtime_client::{runtime, GenesisInit}; - - #[test] - fn should_get_override_if_exists() { - let executor = WasmExecutor::default(); - - let overrides = crate::client::wasm_override::dummy_overrides(); - let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); - let onchain_code = RuntimeCode { - code_fetcher: &onchain_code, - heap_pages: Some(128), - hash: vec![0, 0, 0, 0], - }; - - let backend = Arc::new(in_mem::Backend::::new()); - - // wasm_runtime_overrides is `None` here because we construct the - // LocalCallExecutor directly later on - let client_config = ClientConfig::default(); - - let genesis_block_builder = crate::GenesisBlockBuilder::new( - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - !client_config.no_genesis, - backend.clone(), - executor.clone(), - ) - .expect("Creates genesis block builder"); - - // client is used for the convenience of creating and inserting the genesis block. - let _client = - crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( - backend.clone(), - executor.clone(), - genesis_block_builder, - Box::new(TaskExecutor::new()), - None, - None, - client_config, - ) - .expect("Creates a client"); - - let call_executor = LocalCallExecutor { - backend: backend.clone(), - executor: executor.clone(), - wasm_override: Arc::new(Some(overrides)), - wasm_substitutes: WasmSubstitutes::new( - Default::default(), - executor.clone(), - backend.clone(), - ) - .unwrap(), - execution_extensions: Arc::new(ExecutionExtensions::new( - None, - Arc::new(executor.clone()), - )), - }; - - let check = call_executor - .check_override( - onchain_code, - &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), - backend.blockchain().info().genesis_hash, - ) - .expect("RuntimeCode override") - .0; - - assert_eq!(Some(vec![2, 2, 2, 2, 2, 2, 2, 2]), check.fetch_runtime_code().map(Into::into)); - } - - #[test] - fn returns_runtime_version_from_substitute() { - const SUBSTITUTE_SPEC_NAME: &str = "substitute-spec-name-cool"; - - let executor = WasmExecutor::default(); - - let backend = Arc::new(in_mem::Backend::::new()); - - // Let's only override the `spec_name` for our testing purposes. - let substitute = sp_version::embed::embed_runtime_version( - &substrate_test_runtime::WASM_BINARY_BLOATY.unwrap(), - sp_version::RuntimeVersion { - spec_name: SUBSTITUTE_SPEC_NAME.into(), - ..substrate_test_runtime::VERSION - }, - ) - .unwrap(); - - let client_config = crate::client::ClientConfig { - wasm_runtime_substitutes: vec![(0, substitute)].into_iter().collect::>(), - ..Default::default() - }; - - let genesis_block_builder = crate::GenesisBlockBuilder::new( - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - !client_config.no_genesis, - backend.clone(), - executor.clone(), - ) - .expect("Creates genesis block builder"); - - // client is used for the convenience of creating and inserting the genesis block. - let client = - crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( - backend.clone(), - executor.clone(), - genesis_block_builder, - Box::new(TaskExecutor::new()), - None, - None, - client_config, - ) - .expect("Creates a client"); - - let version = client.runtime_version_at(client.chain_info().genesis_hash).unwrap(); - - assert_eq!(SUBSTITUTE_SPEC_NAME, &*version.spec_name); - } -} diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 3c25c233775bea6ed8297d144bcadf823b918e7e..a2c9212f7b9c9ce80860aa9ca9f3b9e8b0a844d5 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -18,7 +18,10 @@ //! Substrate Client -use super::block_rules::{BlockRules, LookupResult as BlockLookupResult}; +use super::{ + block_rules::{BlockRules, LookupResult as BlockLookupResult}, + CodeProvider, +}; use crate::client::notification_pinning::NotificationPinningWorker; use log::{debug, info, trace, warn}; use parking_lot::{Mutex, RwLock}; @@ -57,10 +60,7 @@ use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_core::{ - storage::{ - well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, - StorageKey, - }, + storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, StorageKey}, traits::{CallContext, SpawnNamed}, }; use sp_runtime::{ @@ -115,6 +115,7 @@ where config: ClientConfig, telemetry: Option, unpin_worker_sender: TracingUnboundedSender>, + code_provider: CodeProvider, _phantom: PhantomData, } @@ -410,6 +411,7 @@ where Block, BlockImportOperation = >::BlockImportOperation, >, + E: Clone, B: 'static, { let info = backend.blockchain().info(); @@ -438,6 +440,7 @@ where ); let unpin_worker = NotificationPinningWorker::new(rx, backend.clone()); spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run())); + let code_provider = CodeProvider::new(&config, executor.clone(), backend.clone())?; Ok(Client { backend, @@ -453,6 +456,7 @@ where config, telemetry, unpin_worker_sender, + code_provider, _phantom: Default::default(), }) } @@ -475,13 +479,10 @@ where } /// Get the code at a given block. + /// + /// This takes any potential substitutes into account, but ignores overrides. pub fn code_at(&self, hash: Block::Hash) -> sp_blockchain::Result> { - Ok(StorageProvider::storage(self, hash, &StorageKey(well_known_keys::CODE.to_vec()))? - .expect( - "None is returned if there's no value stored for the given key;\ - ':code' key is always defined; qed", - ) - .0) + self.code_provider.code_at_ignoring_overrides(hash) } /// Get the RuntimeVersion at a given block. @@ -1779,7 +1780,7 @@ where /// Check block preconditions. async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { let BlockCheckParams { @@ -1861,10 +1862,10 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { - (&*self).check_block(block).await + (&self).check_block(block).await } } diff --git a/substrate/client/service/src/client/code_provider.rs b/substrate/client/service/src/client/code_provider.rs new file mode 100644 index 0000000000000000000000000000000000000000..8ba7766ea65b5ba0a723aaa151d7121e2bbc4c53 --- /dev/null +++ b/substrate/client/service/src/client/code_provider.rs @@ -0,0 +1,348 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use sc_client_api::backend; +use sc_executor::{RuntimeVersion, RuntimeVersionOf}; +use sp_core::traits::{FetchRuntimeCode, RuntimeCode}; +use sp_runtime::traits::Block as BlockT; +use sp_state_machine::{Ext, OverlayedChanges}; +use std::sync::Arc; + +/// Provider for fetching `:code` of a block. +/// +/// As a node can run with code overrides or substitutes, this will ensure that these are taken into +/// account before returning the actual `code` for a block. +pub struct CodeProvider { + backend: Arc, + executor: Arc, + wasm_override: Arc>, + wasm_substitutes: WasmSubstitutes, +} + +impl Clone for CodeProvider { + fn clone(&self) -> Self { + Self { + backend: self.backend.clone(), + executor: self.executor.clone(), + wasm_override: self.wasm_override.clone(), + wasm_substitutes: self.wasm_substitutes.clone(), + } + } +} + +impl CodeProvider +where + Block: BlockT, + Backend: backend::Backend, + Executor: RuntimeVersionOf, +{ + /// Create a new instance. + pub fn new( + client_config: &ClientConfig, + executor: Executor, + backend: Arc, + ) -> sp_blockchain::Result { + let wasm_override = client_config + .wasm_runtime_overrides + .as_ref() + .map(|p| WasmOverride::new(p.clone(), &executor)) + .transpose()?; + + let executor = Arc::new(executor); + + let wasm_substitutes = WasmSubstitutes::new( + client_config.wasm_runtime_substitutes.clone(), + executor.clone(), + backend.clone(), + )?; + + Ok(Self { backend, executor, wasm_override: Arc::new(wasm_override), wasm_substitutes }) + } + + /// Returns the `:code` for the given `block`. + /// + /// This takes into account potential overrides/substitutes. + pub fn code_at_ignoring_overrides(&self, block: Block::Hash) -> sp_blockchain::Result> { + let state = self.backend.state_at(block)?; + + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + + self.maybe_override_code_internal(runtime_code, &state, block, true) + .and_then(|r| { + r.0.fetch_runtime_code().map(Into::into).ok_or_else(|| { + sp_blockchain::Error::Backend("Could not find `:code` in backend.".into()) + }) + }) + } + + /// Maybe override the given `onchain_code`. + /// + /// This takes into account potential overrides/substitutes. + pub fn maybe_override_code<'a>( + &'a self, + onchain_code: RuntimeCode<'a>, + state: &Backend::State, + hash: Block::Hash, + ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> { + self.maybe_override_code_internal(onchain_code, state, hash, false) + } + + /// Maybe override the given `onchain_code`. + /// + /// This takes into account potential overrides(depending on `ignore_overrides`)/substitutes. + fn maybe_override_code_internal<'a>( + &'a self, + onchain_code: RuntimeCode<'a>, + state: &Backend::State, + hash: Block::Hash, + ignore_overrides: bool, + ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> { + let on_chain_version = self.on_chain_runtime_version(&onchain_code, state)?; + let code_and_version = if let Some(d) = self.wasm_override.as_ref().as_ref().and_then(|o| { + if ignore_overrides { + return None + } + + o.get( + &on_chain_version.spec_version, + onchain_code.heap_pages, + &on_chain_version.spec_name, + ) + }) { + tracing::debug!(target: "code-provider::overrides", block = ?hash, "using WASM override"); + d + } else if let Some(s) = + self.wasm_substitutes + .get(on_chain_version.spec_version, onchain_code.heap_pages, hash) + { + tracing::debug!(target: "code-provider::substitutes", block = ?hash, "Using WASM substitute"); + s + } else { + tracing::debug!( + target: "code-provider", + block = ?hash, + "Neither WASM override nor substitute available, using onchain code", + ); + (onchain_code, on_chain_version) + }; + + Ok(code_and_version) + } + + /// Returns the on chain runtime version. + fn on_chain_runtime_version( + &self, + code: &RuntimeCode, + state: &Backend::State, + ) -> sp_blockchain::Result { + let mut overlay = OverlayedChanges::default(); + + let mut ext = Ext::new(&mut overlay, state, None); + + self.executor + .runtime_version(&mut ext, code) + .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use backend::Backend; + use sc_client_api::{in_mem, HeaderBackend}; + use sc_executor::WasmExecutor; + use sp_core::{ + testing::TaskExecutor, + traits::{FetchRuntimeCode, WrappedRuntimeCode}, + }; + use std::collections::HashMap; + use substrate_test_runtime_client::{runtime, GenesisInit}; + + #[test] + fn no_override_no_substitutes_work() { + let executor = WasmExecutor::default(); + + let code_fetcher = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); + let onchain_code = RuntimeCode { + code_fetcher: &code_fetcher, + heap_pages: Some(128), + hash: vec![0, 0, 0, 0], + }; + + let backend = Arc::new(in_mem::Backend::::new()); + + // wasm_runtime_overrides is `None` here because we construct the + // LocalCallExecutor directly later on + let client_config = ClientConfig::default(); + + let genesis_block_builder = crate::GenesisBlockBuilder::new( + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + !client_config.no_genesis, + backend.clone(), + executor.clone(), + ) + .expect("Creates genesis block builder"); + + // client is used for the convenience of creating and inserting the genesis block. + let _client = + crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( + backend.clone(), + executor.clone(), + genesis_block_builder, + Box::new(TaskExecutor::new()), + None, + None, + client_config.clone(), + ) + .expect("Creates a client"); + + let executor = Arc::new(executor); + + let code_provider = CodeProvider { + backend: backend.clone(), + executor: executor.clone(), + wasm_override: Arc::new(None), + wasm_substitutes: WasmSubstitutes::new(Default::default(), executor, backend.clone()) + .unwrap(), + }; + + let check = code_provider + .maybe_override_code( + onchain_code, + &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), + backend.blockchain().info().genesis_hash, + ) + .expect("RuntimeCode override") + .0; + + assert_eq!(code_fetcher.fetch_runtime_code(), check.fetch_runtime_code()); + } + + #[test] + fn should_get_override_if_exists() { + let executor = WasmExecutor::default(); + + let overrides = crate::client::wasm_override::dummy_overrides(); + let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); + let onchain_code = RuntimeCode { + code_fetcher: &onchain_code, + heap_pages: Some(128), + hash: vec![0, 0, 0, 0], + }; + + let backend = Arc::new(in_mem::Backend::::new()); + + // wasm_runtime_overrides is `None` here because we construct the + // LocalCallExecutor directly later on + let client_config = ClientConfig::default(); + + let genesis_block_builder = crate::GenesisBlockBuilder::new( + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + !client_config.no_genesis, + backend.clone(), + executor.clone(), + ) + .expect("Creates genesis block builder"); + + // client is used for the convenience of creating and inserting the genesis block. + let _client = + crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( + backend.clone(), + executor.clone(), + genesis_block_builder, + Box::new(TaskExecutor::new()), + None, + None, + client_config.clone(), + ) + .expect("Creates a client"); + + let executor = Arc::new(executor); + + let code_provider = CodeProvider { + backend: backend.clone(), + executor: executor.clone(), + wasm_override: Arc::new(Some(overrides)), + wasm_substitutes: WasmSubstitutes::new(Default::default(), executor, backend.clone()) + .unwrap(), + }; + + let check = code_provider + .maybe_override_code( + onchain_code, + &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), + backend.blockchain().info().genesis_hash, + ) + .expect("RuntimeCode override") + .0; + + assert_eq!(Some(vec![2, 2, 2, 2, 2, 2, 2, 2]), check.fetch_runtime_code().map(Into::into)); + } + + #[test] + fn returns_runtime_version_from_substitute() { + const SUBSTITUTE_SPEC_NAME: &str = "substitute-spec-name-cool"; + + let executor = WasmExecutor::default(); + + let backend = Arc::new(in_mem::Backend::::new()); + + // Let's only override the `spec_name` for our testing purposes. + let substitute = sp_version::embed::embed_runtime_version( + &substrate_test_runtime::WASM_BINARY_BLOATY.unwrap(), + sp_version::RuntimeVersion { + spec_name: SUBSTITUTE_SPEC_NAME.into(), + ..substrate_test_runtime::VERSION + }, + ) + .unwrap(); + + let client_config = crate::client::ClientConfig { + wasm_runtime_substitutes: vec![(0, substitute)].into_iter().collect::>(), + ..Default::default() + }; + + let genesis_block_builder = crate::GenesisBlockBuilder::new( + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + !client_config.no_genesis, + backend.clone(), + executor.clone(), + ) + .expect("Creates genesis block builder"); + + // client is used for the convenience of creating and inserting the genesis block. + let client = + crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( + backend.clone(), + executor.clone(), + genesis_block_builder, + Box::new(TaskExecutor::new()), + None, + None, + client_config, + ) + .expect("Creates a client"); + + let version = client.runtime_version_at(client.chain_info().genesis_hash).unwrap(); + + assert_eq!(SUBSTITUTE_SPEC_NAME, &*version.spec_name); + } +} diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index 0703cc2b47d144d4e67418cfb9966cd1cd209392..ec77a92f162f0c2927a8d5fb6537e4e2ebb861a8 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -47,14 +47,14 @@ mod block_rules; mod call_executor; mod client; +mod code_provider; mod notification_pinning; mod wasm_override; mod wasm_substitutes; -pub use self::{ - call_executor::LocalCallExecutor, - client::{Client, ClientConfig}, -}; +pub use call_executor::LocalCallExecutor; +pub use client::{Client, ClientConfig}; +pub(crate) use code_provider::CodeProvider; #[cfg(feature = "test-helpers")] pub use self::client::{new_in_mem, new_with_backend}; diff --git a/substrate/client/service/src/client/wasm_substitutes.rs b/substrate/client/service/src/client/wasm_substitutes.rs index 70db0ef20f5a83907536f67d8ef36500a8065b48..07ca6c96062831f37ecf35ef70847f47ec9b20d8 100644 --- a/substrate/client/service/src/client/wasm_substitutes.rs +++ b/substrate/client/service/src/client/wasm_substitutes.rs @@ -94,7 +94,7 @@ impl From for sp_blockchain::Error { pub struct WasmSubstitutes { /// spec_version -> WasmSubstitute substitutes: Arc>>, - executor: Executor, + executor: Arc, backend: Arc, } @@ -110,14 +110,14 @@ impl Clone for WasmSubstitutes WasmSubstitutes where - Executor: RuntimeVersionOf + Clone + 'static, + Executor: RuntimeVersionOf, Backend: backend::Backend, Block: BlockT, { /// Create a new instance. pub fn new( substitutes: HashMap, Vec>, - executor: Executor, + executor: Arc, backend: Arc, ) -> Result { let substitutes = substitutes diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 187e18aa3cace20c9fe9ffe2e9f3fdcbb6658bbd..e4788f1f3376cc24d2436fd0282bd9b2c1a28844 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -280,7 +280,7 @@ impl Default for RpcMethods { static mut BASE_PATH_TEMP: Option = None; /// The base path that is used for everything that needs to be written on disk to run a node. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct BasePath { path: PathBuf, } diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index a51bb4012d5d8ac0a4fb39e73c5a872a33e5fa7d..63be296d1b2161eaa8d7f71ea660a9d5a2037ecf 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -137,7 +137,7 @@ pub struct PartialComponents, /// The chain task manager. pub task_manager: TaskManager, - /// A keystore container instance.. + /// A keystore container instance. pub keystore_container: KeystoreContainer, /// A chain selection algorithm instance. pub select_chain: SelectChain, diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 3c754231395273838b02f8020d646aebd9ea54a2..ade7b3b4e6a0be6be4fd3f697e8cdcfc5ccc308d 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -15,33 +15,33 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-channel = "1.8.0" -array-bytes = "6.2.2" -fdlimit = "0.3.0" -futures = "0.3.30" +async-channel = { workspace = true } +array-bytes = { workspace = true, default-features = true } +fdlimit = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } -parking_lot = "0.12.1" -tempfile = "3.1.0" -tokio = { version = "1.22.0", features = ["time"] } -sc-block-builder = { path = "../../block-builder" } -sc-client-api = { path = "../../api" } -sc-client-db = { path = "../../db", default-features = false } -sc-consensus = { path = "../../consensus/common" } -sc-executor = { path = "../../executor" } -sc-network = { path = "../../network" } -sc-network-sync = { path = "../../network/sync" } -sc-service = { path = "..", features = ["test-helpers"] } -sc-transaction-pool-api = { path = "../../transaction-pool/api" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } -sp-storage = { path = "../../../primitives/storage" } -sp-tracing = { path = "../../../primitives/tracing" } -sp-trie = { path = "../../../primitives/trie" } -sp-io = { path = "../../../primitives/io" } -substrate-test-runtime = { path = "../../../test-utils/runtime" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +codec = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +tempfile = { workspace = true } +tokio = { features = ["time"], workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } +sc-consensus = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/state-db/Cargo.toml b/substrate/client/state-db/Cargo.toml index e203eb5a3282fa1ed73c75426b6039f046bef7d7..be4f1ef973c98da80f206b98a38abf375fbefb36 100644 --- a/substrate/client/state-db/Cargo.toml +++ b/substrate/client/state-db/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -sp-core = { path = "../../primitives/core" } +parking_lot = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } diff --git a/substrate/client/statement-store/Cargo.toml b/substrate/client/statement-store/Cargo.toml index 8ca6d11dbe0dc786929925a90a40b5035f2607be..1cb682f054d72409b2a77b84f2b4cbc63eeaf728 100644 --- a/substrate/client/statement-store/Cargo.toml +++ b/substrate/client/statement-store/Cargo.toml @@ -17,18 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -parity-db = "0.4.12" -tokio = { version = "1.22.0", features = ["time"] } -sp-statement-store = { path = "../../primitives/statement-store" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-runtime = { path = "../../primitives/runtime" } -sc-client-api = { path = "../api" } -sc-keystore = { path = "../keystore" } +parking_lot = { workspace = true, default-features = true } +parity-db = { workspace = true } +tokio = { features = ["time"], workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } [dev-dependencies] -tempfile = "3.1.0" -env_logger = "0.11" +tempfile = { workspace = true } +env_logger = { workspace = true } diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index 5248ebdf9a650ba2e1fd1a34f18dbbc0b16781fe..8c490284dccc6baae71a678702187c84cfb3b7a4 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -12,9 +12,9 @@ homepage = "https://substrate.io" workspace = true [dependencies] -clap = { version = "4.5.3", features = ["derive", "string"] } +clap = { features = ["derive", "string"], workspace = true } log = { workspace = true, default-features = true } -fs4 = "0.7.0" -sp-core = { path = "../../primitives/core" } -tokio = { version = "1.22.0", features = ["time"] } +fs4 = { workspace = true } +sp-core = { workspace = true, default-features = true } +tokio = { features = ["time"], workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index d5bdc920f7c9b48991354f5df917a3b3d72c86fa..1cbaadb70fff99d8242d6930a2857d1918a6f3ed 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -15,15 +15,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -sc-chain-spec = { path = "../chain-spec" } -sc-client-api = { path = "../api" } -sc-consensus-babe = { path = "../consensus/babe" } -sc-consensus-epochs = { path = "../consensus/epochs" } -sc-consensus-grandpa = { path = "../consensus/grandpa" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-runtime = { path = "../../primitives/runtime" } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index 32b7755c64b5046fcd31cb6e0d9ff2c1061ed57a..f79345d67242901381a42470a851f509c3a910f5 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -17,20 +17,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.30" -libc = "0.2" +futures = { workspace = true } +libc = { workspace = true } log = { workspace = true, default-features = true } -rand = "0.8.5" -rand_pcg = "0.3.1" -derive_more = "0.99" -regex = "1" +rand = { workspace = true, default-features = true } +rand_pcg = { workspace = true } +derive_more = { workspace = true, default-features = true } +regex = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-telemetry = { path = "../telemetry" } -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-io = { path = "../../primitives/io" } -sp-std = { path = "../../primitives/std" } +sc-telemetry = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } [dev-dependencies] -sp-runtime = { path = "../../primitives/runtime" } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index 0cce2acf6409c1a1a179d60d31b997469e8fd87e..a789ebc5f1afd569e6b1ea66b0b2ff7227335d4b 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -chrono = "0.4.31" -futures = "0.3.30" -libp2p = { version = "0.51.4", features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"] } +chrono = { workspace = true } +futures = { workspace = true } +libp2p = { features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"], workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -pin-project = "1.0.12" -sc-utils = { path = "../utils" } -sc-network = { path = "../network" } -rand = "0.8.5" +parking_lot = { workspace = true, default-features = true } +pin-project = { workspace = true } +sc-utils = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -wasm-timer = "0.2.5" +wasm-timer = { workspace = true } diff --git a/substrate/client/telemetry/src/transport.rs b/substrate/client/telemetry/src/transport.rs index a82626caac2d340bfcee90dd7c1f06ac4a6edccd..ca6ceecbed63b1574c09971250ea5ea247349ef2 100644 --- a/substrate/client/telemetry/src/transport.rs +++ b/substrate/client/telemetry/src/transport.rs @@ -31,7 +31,7 @@ const CONNECT_TIMEOUT: Duration = Duration::from_secs(20); pub(crate) fn initialize_transport() -> Result { let transport = { let tcp_transport = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::new()); - let inner = libp2p::dns::TokioDnsConfig::system(tcp_transport)?; + let inner = libp2p::dns::tokio::Transport::system(tcp_transport)?; libp2p::websocket::framed::WsConfig::new(inner).and_then(|connec, _| { let connec = connec .with(|item| { diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index df674d24c6dd79b36ef7aa7aa36d389a7702678d..cacb1351416a9f4087d7f7db2a6b0ebbea21c447 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -16,32 +16,32 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ansi_term = "0.12.1" -is-terminal = "0.4.9" -chrono = "0.4.31" -codec = { package = "parity-scale-codec", version = "3.6.12" } -lazy_static = "1.4.0" -libc = "0.2.152" +ansi_term = { workspace = true } +is-terminal = { workspace = true } +chrono = { workspace = true } +codec = { workspace = true, default-features = true } +lazy_static = { workspace = true } +libc = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -regex = "1.6.0" -rustc-hash = "1.1.0" +parking_lot = { workspace = true, default-features = true } +regex = { workspace = true } +rustc-hash = { workspace = true } serde = { workspace = true, default-features = true } thiserror = { workspace = true } -tracing = "0.1.29" -tracing-log = "0.2.0" +tracing = { workspace = true, default-features = true } +tracing-log = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter", "parking_lot"] } -sc-client-api = { path = "../api" } -sc-tracing-proc-macro = { path = "proc-macro" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-rpc = { path = "../../primitives/rpc" } -sp-runtime = { path = "../../primitives/runtime" } -sp-tracing = { path = "../../primitives/tracing" } +sc-client-api = { workspace = true, default-features = true } +sc-tracing-proc-macro = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [dev-dependencies] -criterion = "0.5.1" +criterion = { workspace = true, default-features = true } tracing-subscriber = { workspace = true, features = ["chrono", "parking_lot"] } [[bench]] diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index fec34aa0bca935e22f0f7d7faea17cac5d8f10bb..9162bdc1ad8ab2d2de1938180b26cb6cb0426ab0 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "3.0.0" -proc-macro2 = "1.0.56" +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } quote = { features = ["proc-macro"], workspace = true } syn = { features = ["extra-traits", "full", "parsing", "proc-macro"], workspace = true } diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 351650297ffc599a0081dff9cf0514dc6c6fc810..95b391faf169ea5fd8b431d504f0addcc2798798 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -16,36 +16,36 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -futures-timer = "3.0.2" -linked-hash-map = "0.5.4" +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-client-api = { path = "../api" } -sc-transaction-pool-api = { path = "api" } -sc-utils = { path = "../utils" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-runtime = { path = "../../primitives/runtime" } -sp-tracing = { path = "../../primitives/tracing" } -sp-transaction-pool = { path = "../../primitives/transaction-pool" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } [dev-dependencies] -array-bytes = "6.2.2" -assert_matches = "1.3.0" -criterion = "0.5.1" -sc-block-builder = { path = "../block-builder" } -sp-consensus = { path = "../../primitives/consensus/common" } -substrate-test-runtime = { path = "../../test-utils/runtime" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -substrate-test-runtime-transaction-pool = { path = "../../test-utils/runtime/transaction-pool" } +array-bytes = { workspace = true, default-features = true } +assert_matches = { workspace = true } +criterion = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime-transaction-pool = { workspace = true } [[bench]] name = "basics" diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index be80a7706b3efe653ceaf071f42a4e017bc428db..6fec613c1114d36c2dbc7ebd50314a2feb288b12 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -12,15 +12,15 @@ description = "Transaction pool client facing API." workspace = true [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/utils/Cargo.toml b/substrate/client/utils/Cargo.toml index a101f4b3f3ad009a35c1cc061e4c9515b98e1419..4053d77a13ebfcf587848c67d6af848f12af7084 100644 --- a/substrate/client/utils/Cargo.toml +++ b/substrate/client/utils/Cargo.toml @@ -13,18 +13,18 @@ readme = "README.md" workspace = true [dependencies] -async-channel = "1.8.0" -futures = "0.3.30" -futures-timer = "3.0.2" -lazy_static = "1.4.0" +async-channel = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +lazy_static = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -prometheus = { version = "0.13.0", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +parking_lot = { workspace = true, default-features = true } +prometheus = { workspace = true } +sp-arithmetic = { workspace = true } [features] default = ["metered"] metered = [] [dev-dependencies] -tokio-test = "0.4.2" +tokio-test = { workspace = true } diff --git a/substrate/deprecated/hashing/Cargo.toml b/substrate/deprecated/hashing/Cargo.toml index 8695ccc8fca2241b3947e7f2ac6a070174a158a3..9db6cb64050d64988e5926ef4c985732c5a10e1d 100644 --- a/substrate/deprecated/hashing/Cargo.toml +++ b/substrate/deprecated/hashing/Cargo.toml @@ -16,7 +16,7 @@ maintenance = { status = "deprecated" } targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } +sp-crypto-hashing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/deprecated/hashing/proc-macro/Cargo.toml b/substrate/deprecated/hashing/proc-macro/Cargo.toml index aa78809241f0394e5f984c5a41a151a9ba247595..ec5188bc53bd9b293dccef00a2b4dcea98e9e584 100644 --- a/substrate/deprecated/hashing/proc-macro/Cargo.toml +++ b/substrate/deprecated/hashing/proc-macro/Cargo.toml @@ -16,4 +16,4 @@ maintenance = { status = "deprecated" } targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-crypto-hashing-proc-macro = { path = "../../../primitives/crypto/hashing/proc-macro" } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index a3b3d1900e6e6b536a876705a443d810fbe09f4d..594cb505cd60ff97b8247f4a9d284c3b3b82139f 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -18,51 +18,51 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # primitive deps, used for developing FRAME pallets. -sp-runtime = { default-features = false, path = "../primitives/runtime" } -sp-std = { default-features = false, path = "../primitives/std" } -sp-io = { default-features = false, path = "../primitives/io" } -sp-core = { default-features = false, path = "../primitives/core" } -sp-arithmetic = { default-features = false, path = "../primitives/arithmetic" } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-arithmetic = { workspace = true } # frame deps, for developing FRAME pallets. -frame-support = { default-features = false, path = "support" } -frame-system = { default-features = false, path = "system" } +frame-support = { workspace = true } +frame-system = { workspace = true } # primitive types used for developing FRAME runtimes. -sp-version = { default-features = false, path = "../primitives/version", optional = true } -sp-api = { default-features = false, path = "../primitives/api", optional = true } -sp-block-builder = { default-features = false, path = "../primitives/block-builder", optional = true } -sp-transaction-pool = { default-features = false, path = "../primitives/transaction-pool", optional = true } -sp-offchain = { default-features = false, path = "../primitives/offchain", optional = true } -sp-session = { default-features = false, path = "../primitives/session", optional = true } -sp-consensus-aura = { default-features = false, path = "../primitives/consensus/aura", optional = true } -sp-consensus-grandpa = { default-features = false, path = "../primitives/consensus/grandpa", optional = true } -sp-inherents = { default-features = false, path = "../primitives/inherents", optional = true } -sp-storage = { default-features = false, path = "../primitives/storage", optional = true } +sp-version = { optional = true, workspace = true } +sp-api = { optional = true, workspace = true } +sp-block-builder = { optional = true, workspace = true } +sp-transaction-pool = { optional = true, workspace = true } +sp-offchain = { optional = true, workspace = true } +sp-session = { optional = true, workspace = true } +sp-consensus-aura = { optional = true, workspace = true } +sp-consensus-grandpa = { optional = true, workspace = true } +sp-inherents = { optional = true, workspace = true } +sp-storage = { optional = true, workspace = true } -frame-executive = { default-features = false, path = "../frame/executive", optional = true } -frame-system-rpc-runtime-api = { default-features = false, path = "../frame/system/rpc/runtime-api", optional = true } +frame-executive = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { optional = true, workspace = true } # Used for runtime benchmarking -frame-benchmarking = { default-features = false, path = "../frame/benchmarking", optional = true } -frame-system-benchmarking = { default-features = false, path = "../frame/system/benchmarking", optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } # Used for try-runtime -frame-try-runtime = { default-features = false, path = "../frame/try-runtime", optional = true } +frame-try-runtime = { optional = true, workspace = true } -docify = "0.2.8" +docify = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-examples = { path = "./examples" } +pallet-examples = { workspace = true } [features] default = ["runtime", "std"] diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index 10e2feba62376d9cd8346f44531dbd3e6bc47870..7ad9961d9b89cfd28023e70934a2697cadaa6984 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -16,30 +16,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "6.2.2", optional = true } +array-bytes = { optional = true, workspace = true, default-features = true } log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing", default-features = false, optional = true } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { workspace = true } +sp-core = { workspace = true } +sp-crypto-hashing = { optional = true, workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -pallet-identity = { path = "../identity", default-features = false } -pallet-collective = { path = "../collective", default-features = false, optional = true } +pallet-identity = { workspace = true } +pallet-collective = { optional = true, workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -sp-crypto-hashing = { path = "../../primitives/crypto/hashing", default-features = false } -pallet-balances = { path = "../balances" } -pallet-collective = { path = "../collective" } +array-bytes = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index a9cfd6d0fde0eb149e4f33db86823e7e711426cd..1a0a899bcccb4b8a7f194f0770de03bf7fab7451 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -52,24 +52,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxLocks: u32 = 10; -} +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = MaxLocks; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index bfcda2299d5a116eca62a5870a84823876bc89f5..27d86bf6710b0c343e72f8c0b01494020bb4b0d3 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -16,23 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -log = { version = "0.4.20", default-features = false } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../primitives/api", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +codec = { workspace = true } +log = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-arithmetic = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-assets = { path = "../assets" } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } +pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } [features] default = ["std"] diff --git a/substrate/frame/asset-conversion/ops/Cargo.toml b/substrate/frame/asset-conversion/ops/Cargo.toml index c5efbf9f6f442b0430cecab10badb4ca13e49d0a..0dc6cd5b1672ae02fcaf8b9a08fd51618bf981c5 100644 --- a/substrate/frame/asset-conversion/ops/Cargo.toml +++ b/substrate/frame/asset-conversion/ops/Cargo.toml @@ -15,23 +15,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -log = { version = "0.4.20", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -pallet-asset-conversion = { path = "..", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } +codec = { workspace = true } +log = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-asset-conversion = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-arithmetic = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../../balances" } -pallet-assets = { path = "../../assets" } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } +pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } [features] default = ["std"] diff --git a/substrate/frame/asset-conversion/ops/src/mock.rs b/substrate/frame/asset-conversion/ops/src/mock.rs index 9454b3a9ad448af4b47d9fd974e6ec8ad5ed4e59..91c18b2e79494eb39812c0aca9f33dfdf6710d60 100644 --- a/substrate/frame/asset-conversion/ops/src/mock.rs +++ b/substrate/frame/asset-conversion/ops/src/mock.rs @@ -52,7 +52,7 @@ construct_runtime!( } ); -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; @@ -60,7 +60,6 @@ impl frame_system::Config for Test { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/substrate/frame/asset-conversion/src/mock.rs b/substrate/frame/asset-conversion/src/mock.rs index 477866e0051bc43207d1fb728380e5f84258964f..d8832d70488af7d38507c83a964bc1c2c07815d0 100644 --- a/substrate/frame/asset-conversion/src/mock.rs +++ b/substrate/frame/asset-conversion/src/mock.rs @@ -61,20 +61,11 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<100>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_assets::Config for Test { diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 4662469e46ce423edaab234041a852a42b0711bb..96c87020684fbf195b8d2cac1976c4884ea6d7cc 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -15,21 +15,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, optional = true } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-core = { optional = true, workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-io = { path = "../../primitives/io" } -sp-core = { path = "../../primitives/core", default-features = false } +pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/asset-rate/src/mock.rs b/substrate/frame/asset-rate/src/mock.rs index d01996dab193d6d6e21a14efed7f14ff2ef8c931..c829d78afa886ad50546196322e37921c7240ed7 100644 --- a/substrate/frame/asset-rate/src/mock.rs +++ b/substrate/frame/asset-rate/src/mock.rs @@ -18,7 +18,7 @@ //! The crate's mock. use crate as pallet_asset_rate; -use frame_support::{derive_impl, traits::ConstU64}; +use frame_support::derive_impl; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -38,20 +38,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = (); } impl pallet_asset_rate::Config for Test { diff --git a/substrate/frame/assets-freezer/Cargo.toml b/substrate/frame/assets-freezer/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..97eadd8465a70d5d356a91c92f5169597ded9b40 --- /dev/null +++ b/substrate/frame/assets-freezer/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "pallet-assets-freezer" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "MIT-0" +homepage = "https://substrate.io" +repository.workspace = true +description = "Provides freezing features to `pallet-assets`" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true } +sp-runtime = { workspace = true } + +[dev-dependencies] +sp-io = { workspace = true } +sp-core = { workspace = true } +pallet-balances = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-assets/std", + "pallet-balances/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/assets-freezer/src/impls.rs b/substrate/frame/assets-freezer/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..cd383f1c3cd1e68f7cc0bcf0bb2841f0acf52119 --- /dev/null +++ b/substrate/frame/assets-freezer/src/impls.rs @@ -0,0 +1,158 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +use frame_support::traits::{ + fungibles::{Inspect, InspectFreeze, MutateFreeze}, + tokens::{DepositConsequence, Fortitude, Preservation, Provenance, WithdrawConsequence}, +}; +use pallet_assets::FrozenBalance; +use sp_runtime::traits::Zero; + +// Implements [`FrozenBalance`] from [`pallet-assets`], so it can understand how much of an +// account balance is frozen, and is able to signal to this pallet when to clear the state of an +// account. +impl, I: 'static> FrozenBalance + for Pallet +{ + fn frozen_balance(asset: T::AssetId, who: &T::AccountId) -> Option { + FrozenBalances::::get(asset, who) + } + + fn died(asset: T::AssetId, who: &T::AccountId) { + FrozenBalances::::remove(asset.clone(), who); + Freezes::::remove(asset, who); + } +} + +// Implement [`fungibles::Inspect`](frame_support::traits::fungibles::Inspect) as it is bound by +// [`fungibles::InspectFreeze`](frame_support::traits::fungibles::InspectFreeze) and +// [`fungibles::MutateFreeze`](frame_support::traits::fungibles::MutateFreeze). To do so, we'll +// re-export all of `pallet-assets` implementation of the same trait. +impl, I: 'static> Inspect for Pallet { + type AssetId = T::AssetId; + type Balance = T::Balance; + + fn total_issuance(asset: Self::AssetId) -> Self::Balance { + pallet_assets::Pallet::::total_issuance(asset) + } + + fn minimum_balance(asset: Self::AssetId) -> Self::Balance { + pallet_assets::Pallet::::minimum_balance(asset) + } + + fn total_balance(asset: Self::AssetId, who: &T::AccountId) -> Self::Balance { + pallet_assets::Pallet::::total_balance(asset, who) + } + + fn balance(asset: Self::AssetId, who: &T::AccountId) -> Self::Balance { + pallet_assets::Pallet::::balance(asset, who) + } + + fn reducible_balance( + asset: Self::AssetId, + who: &T::AccountId, + preservation: Preservation, + force: Fortitude, + ) -> Self::Balance { + pallet_assets::Pallet::::reducible_balance(asset, who, preservation, force) + } + + fn can_deposit( + asset: Self::AssetId, + who: &T::AccountId, + amount: Self::Balance, + provenance: Provenance, + ) -> DepositConsequence { + pallet_assets::Pallet::::can_deposit(asset, who, amount, provenance) + } + + fn can_withdraw( + asset: Self::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { + pallet_assets::Pallet::::can_withdraw(asset, who, amount) + } + + fn asset_exists(asset: Self::AssetId) -> bool { + pallet_assets::Pallet::::asset_exists(asset) + } +} + +impl, I: 'static> InspectFreeze for Pallet { + type Id = T::RuntimeFreezeReason; + + fn balance_frozen(asset: Self::AssetId, id: &Self::Id, who: &T::AccountId) -> Self::Balance { + let freezes = Freezes::::get(asset, who); + freezes.into_iter().find(|l| &l.id == id).map_or(Zero::zero(), |l| l.amount) + } + + fn can_freeze(asset: Self::AssetId, id: &Self::Id, who: &T::AccountId) -> bool { + let freezes = Freezes::::get(asset, who); + !freezes.is_full() || freezes.into_iter().any(|i| i.id == *id) + } +} + +impl, I: 'static> MutateFreeze for Pallet { + fn set_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &T::AccountId, + amount: Self::Balance, + ) -> sp_runtime::DispatchResult { + if amount.is_zero() { + return Self::thaw(asset, id, who); + } + let mut freezes = Freezes::::get(asset.clone(), who); + if let Some(i) = freezes.iter_mut().find(|i| &i.id == id) { + i.amount = amount; + } else { + freezes + .try_push(IdAmount { id: *id, amount }) + .map_err(|_| Error::::TooManyFreezes)?; + } + Self::update_freezes(asset, who, freezes.as_bounded_slice()) + } + + fn extend_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &T::AccountId, + amount: Self::Balance, + ) -> sp_runtime::DispatchResult { + if amount.is_zero() { + return Ok(()); + } + let mut freezes = Freezes::::get(asset.clone(), who); + if let Some(i) = freezes.iter_mut().find(|x| &x.id == id) { + i.amount = i.amount.max(amount); + } else { + freezes + .try_push(IdAmount { id: *id, amount }) + .map_err(|_| Error::::TooManyFreezes)?; + } + Self::update_freezes(asset, who, freezes.as_bounded_slice()) + } + + fn thaw(asset: Self::AssetId, id: &Self::Id, who: &T::AccountId) -> sp_runtime::DispatchResult { + let mut freezes = Freezes::::get(asset.clone(), who); + freezes.retain(|f| &f.id != id); + Self::update_freezes(asset, who, freezes.as_bounded_slice()) + } +} diff --git a/substrate/frame/assets-freezer/src/lib.rs b/substrate/frame/assets-freezer/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..b42d41ac1d9255c0046de37ddf5cf1222173be7f --- /dev/null +++ b/substrate/frame/assets-freezer/src/lib.rs @@ -0,0 +1,176 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Assets Freezer Pallet +//! +//! A pallet capable of freezing fungibles from `pallet-assets`. This is an extension of +//! `pallet-assets`, wrapping [`fungibles::Inspect`](`frame_support::traits::fungibles::Inspect`). +//! It implements both +//! [`fungibles::freeze::Inspect`](frame_support::traits::fungibles::freeze::Inspect) and +//! [`fungibles::freeze::Mutate`](frame_support::traits::fungibles::freeze::Mutate). The complexity +//! of the operations is `O(n)`. where `n` is the variant count of `RuntimeFreezeReason`. +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. +//! +//! ## Overview +//! +//! This pallet provides the following functionality: +//! +//! - Pallet hooks allowing [`pallet-assets`] to know the frozen balance for an account on a given +//! asset (see [`pallet_assets::FrozenBalance`]). +//! - An implementation of +//! [`fungibles::freeze::Inspect`](frame_support::traits::fungibles::freeze::Inspect) and +//! [`fungibles::freeze::Mutate`](frame_support::traits::fungibles::freeze::Mutate), allowing +//! other pallets to manage freezes for the `pallet-assets` assets. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::{ + pallet_prelude::*, + traits::{tokens::IdAmount, VariantCount, VariantCountOf}, + BoundedVec, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use sp_runtime::{ + traits::{Saturating, Zero}, + BoundedSlice, +}; + +pub use pallet::*; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +mod impls; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config(with_default)] + pub trait Config: frame_system::Config + pallet_assets::Config { + /// The overarching freeze reason. + #[pallet::no_default_bounds] + type RuntimeFreezeReason: Parameter + Member + MaxEncodedLen + Copy + VariantCount; + + /// The overarching event type. + #[pallet::no_default_bounds] + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + } + + #[pallet::error] + pub enum Error { + /// Number of freezes on an account would exceed `MaxFreezes`. + TooManyFreezes, + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + // `who`s frozen balance was increased by `amount`. + Frozen { who: T::AccountId, asset_id: T::AssetId, amount: T::Balance }, + // `who`s frozen balance was decreased by `amount`. + Thawed { who: T::AccountId, asset_id: T::AssetId, amount: T::Balance }, + } + + /// A map that stores freezes applied on an account for a given AssetId. + #[pallet::storage] + pub(super) type Freezes, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::AssetId, + Blake2_128Concat, + T::AccountId, + BoundedVec< + IdAmount, + VariantCountOf, + >, + ValueQuery, + >; + + /// A map that stores the current total frozen balance for every account on a given AssetId. + #[pallet::storage] + pub(super) type FrozenBalances, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::AssetId, + Blake2_128Concat, + T::AccountId, + T::Balance, + >; + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + } +} + +impl, I: 'static> Pallet { + fn update_freezes( + asset: T::AssetId, + who: &T::AccountId, + freezes: BoundedSlice< + IdAmount, + VariantCountOf, + >, + ) -> DispatchResult { + let prev_frozen = FrozenBalances::::get(asset.clone(), who).unwrap_or_default(); + let after_frozen = freezes.into_iter().map(|f| f.amount).max().unwrap_or_else(Zero::zero); + FrozenBalances::::set(asset.clone(), who, Some(after_frozen)); + if freezes.is_empty() { + Freezes::::remove(asset.clone(), who); + FrozenBalances::::remove(asset.clone(), who); + } else { + Freezes::::insert(asset.clone(), who, freezes); + } + if prev_frozen > after_frozen { + let amount = prev_frozen.saturating_sub(after_frozen); + Self::deposit_event(Event::Thawed { asset_id: asset, who: who.clone(), amount }); + } else if after_frozen > prev_frozen { + let amount = after_frozen.saturating_sub(prev_frozen); + Self::deposit_event(Event::Frozen { asset_id: asset, who: who.clone(), amount }); + } + Ok(()) + } + + #[cfg(any(test, feature = "try-runtime"))] + fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + for (asset, who, _) in FrozenBalances::::iter() { + let max_frozen_amount = + Freezes::::get(asset.clone(), who.clone()).iter().map(|l| l.amount).max(); + + frame_support::ensure!( + FrozenBalances::::get(asset, who) == max_frozen_amount, + "The `FrozenAmount` is not equal to the maximum amount in `Freezes` for (`asset`, `who`)" + ); + } + + Ok(()) + } +} diff --git a/substrate/frame/assets-freezer/src/mock.rs b/substrate/frame/assets-freezer/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..5e04dfe8e2b9c4adf3553919dfe14b5fe55870ae --- /dev/null +++ b/substrate/frame/assets-freezer/src/mock.rs @@ -0,0 +1,155 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests mock for `pallet-assets-freezer`. + +use crate as pallet_assets_freezer; +pub use crate::*; +use codec::{Compact, Decode, Encode, MaxEncodedLen}; +use frame_support::{ + derive_impl, + traits::{AsEnsureOriginWithArg, ConstU64}, +}; +use scale_info::TypeInfo; +use sp_core::{ConstU32, H256}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +pub type AccountId = u64; +pub type Balance = u64; +pub type AssetId = u32; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Assets: pallet_assets, + AssetsFreezer: pallet_assets_freezer, + Balances: pallet_balances, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Nonce = u64; + type Hash = H256; + type RuntimeCall = RuntimeCall; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); +} + +impl pallet_assets::Config for Test { + type AssetId = AssetId; + type AssetIdParameter = Compact; + type AssetDeposit = ConstU64<1>; + type Balance = Balance; + type AssetAccountDeposit = ConstU64<1>; + type MetadataDepositBase = (); + type MetadataDepositPerByte = (); + type ApprovalDeposit = (); + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type StringLimit = ConstU32<32>; + type Extra = (); + type RemoveItemsLimit = ConstU32<10>; + type CallbackHandle = (); + type Currency = Balances; + type Freezer = AssetsFreezer; + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +#[derive( + Decode, Encode, MaxEncodedLen, PartialEq, Eq, Ord, PartialOrd, TypeInfo, Debug, Clone, Copy, +)] +pub enum DummyFreezeReason { + Governance, + Staking, + Other, +} + +impl VariantCount for DummyFreezeReason { + // Intentionally set below the actual count of variants, to allow testing for `can_freeze` + const VARIANT_COUNT: u32 = 2; +} + +impl Config for Test { + type RuntimeFreezeReason = DummyFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + +pub fn new_test_ext(execute: impl FnOnce()) -> sp_io::TestExternalities { + let t = RuntimeGenesisConfig { + assets: pallet_assets::GenesisConfig { + assets: vec![(1, 0, true, 1)], + metadata: vec![], + accounts: vec![(1, 1, 100)], + next_asset_id: None, + }, + system: Default::default(), + balances: Default::default(), + } + .build_storage() + .unwrap(); + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(|| { + System::set_block_number(1); + execute(); + frame_support::assert_ok!(AssetsFreezer::do_try_state()); + }); + + ext +} diff --git a/substrate/frame/assets-freezer/src/tests.rs b/substrate/frame/assets-freezer/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f2dea79c705a9ecacba4ec427f2f8553920f9a1 --- /dev/null +++ b/substrate/frame/assets-freezer/src/tests.rs @@ -0,0 +1,304 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-assets-freezer. + +use crate::mock::*; + +use codec::Compact; +use frame_support::{ + assert_ok, assert_storage_noop, + traits::{ + fungibles::{Inspect, InspectFreeze, MutateFreeze}, + tokens::{Fortitude, Preservation}, + }, +}; +use pallet_assets::FrozenBalance; + +const WHO: AccountId = 1; +const ASSET_ID: AssetId = 1; + +fn test_set_freeze(id: DummyFreezeReason, amount: Balance) { + let mut freezes = Freezes::::get(ASSET_ID, WHO); + + if let Some(i) = freezes.iter_mut().find(|l| l.id == id) { + i.amount = amount; + } else { + freezes + .try_push(IdAmount { id, amount }) + .expect("freeze is added without exceeding bounds; qed"); + } + + assert_ok!(AssetsFreezer::update_freezes(ASSET_ID, &WHO, freezes.as_bounded_slice())); +} + +fn test_thaw(id: DummyFreezeReason) { + let mut freezes = Freezes::::get(ASSET_ID, WHO); + freezes.retain(|l| l.id != id); + + assert_ok!(AssetsFreezer::update_freezes(ASSET_ID, &WHO, freezes.as_bounded_slice())); +} + +mod impl_frozen_balance { + use super::*; + + #[test] + fn frozen_balance_works() { + new_test_ext(|| { + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), None); + test_set_freeze(DummyFreezeReason::Governance, 1); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), Some(1u64)); + test_set_freeze(DummyFreezeReason::Staking, 3); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), Some(3u64)); + test_set_freeze(DummyFreezeReason::Governance, 2); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), Some(3u64)); + // also test thawing works to reduce a balance, and finally thawing everything resets to + // None + test_thaw(DummyFreezeReason::Governance); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), Some(3u64)); + test_thaw(DummyFreezeReason::Staking); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), None); + }); + } + + #[test] + fn died_works() { + new_test_ext(|| { + test_set_freeze(DummyFreezeReason::Governance, 1); + AssetsFreezer::died(ASSET_ID, &WHO); + assert!(FrozenBalances::::get(ASSET_ID, WHO).is_none()); + assert!(Freezes::::get(ASSET_ID, WHO).is_empty()); + }); + } +} + +mod impl_inspect_freeze { + use super::*; + + #[test] + fn balance_frozen_works() { + new_test_ext(|| { + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Governance, &WHO), + 0u64 + ); + test_set_freeze(DummyFreezeReason::Governance, 1); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Governance, &WHO), + 1u64 + ); + test_set_freeze(DummyFreezeReason::Staking, 3); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Staking, &WHO), + 3u64 + ); + test_set_freeze(DummyFreezeReason::Staking, 2); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Staking, &WHO), + 2u64 + ); + // also test thawing works to reduce a balance, and finally thawing everything resets to + // 0 + test_thaw(DummyFreezeReason::Governance); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Governance, &WHO), + 0u64 + ); + test_thaw(DummyFreezeReason::Staking); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Staking, &WHO), + 0u64 + ); + }); + } + + /// This tests it's not possible to freeze once the freezes [`BoundedVec`] is full. This is, + /// the lenght of the vec is equal to [`Config::MaxFreezes`]. + /// This test assumes a mock configuration where this parameter is set to `2`. + #[test] + fn can_freeze_works() { + new_test_ext(|| { + test_set_freeze(DummyFreezeReason::Governance, 1); + assert!(AssetsFreezer::can_freeze(ASSET_ID, &DummyFreezeReason::Staking, &WHO)); + test_set_freeze(DummyFreezeReason::Staking, 1); + assert!(!AssetsFreezer::can_freeze(ASSET_ID, &DummyFreezeReason::Other, &WHO)); + }); + } +} + +mod impl_mutate_freeze { + use super::*; + + #[test] + fn set_freeze_works() { + new_test_ext(|| { + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 99 + ); + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 10 + )); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 89 + ); + System::assert_last_event( + Event::::Frozen { asset_id: ASSET_ID, who: WHO, amount: 10 }.into(), + ); + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 8 + )); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 91 + ); + System::assert_last_event( + Event::::Thawed { asset_id: ASSET_ID, who: WHO, amount: 2 }.into(), + ); + }); + } + + #[test] + fn extend_freeze_works() { + new_test_ext(|| { + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 10 + )); + assert_storage_noop!(assert_ok!(AssetsFreezer::extend_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 8 + ))); + System::assert_last_event( + Event::::Frozen { asset_id: ASSET_ID, who: WHO, amount: 10 }.into(), + ); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 89 + ); + assert_ok!(AssetsFreezer::extend_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 11 + )); + System::assert_last_event( + Event::::Frozen { asset_id: ASSET_ID, who: WHO, amount: 1 }.into(), + ); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 88 + ); + }); + } + + #[test] + fn thaw_works() { + new_test_ext(|| { + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 10 + )); + System::assert_has_event( + Event::::Frozen { asset_id: ASSET_ID, who: WHO, amount: 10 }.into(), + ); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 89 + ); + assert_ok!(AssetsFreezer::thaw(ASSET_ID, &DummyFreezeReason::Governance, &WHO)); + System::assert_has_event( + Event::::Thawed { asset_id: ASSET_ID, who: WHO, amount: 10 }.into(), + ); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 99 + ); + }); + } +} + +mod with_pallet_assets { + use frame_support::assert_noop; + + use super::*; + + #[test] + fn frozen_balance_affects_balance_transferring() { + new_test_ext(|| { + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 20 + )); + assert_noop!( + Assets::transfer(RuntimeOrigin::signed(WHO), Compact(ASSET_ID), 2, 80), + pallet_assets::Error::::BalanceLow, + ); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(WHO), Compact(ASSET_ID), 2, 79)); + }); + } +} diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index 9647ae4db6baa6ada63350153b8a10993e940401..d0b5dc777898898fa1e71b259ea018be11de8552 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -16,23 +16,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } +impl-trait-for-tuples = "0.2.2" log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +sp-std = { workspace = true } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-runtime = { workspace = true } # Needed for type-safe access to storage DB. -frame-support = { path = "../support", default-features = false } +frame-support = { workspace = true } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { path = "../system", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +sp-core = { workspace = true } [dev-dependencies] -sp-std = { path = "../../primitives/std" } -sp-io = { path = "../../primitives/io" } -pallet-balances = { path = "../balances" } +sp-std = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/assets/src/functions.rs b/substrate/frame/assets/src/functions.rs index 9309d010117576177f6bb055e732c8caf2f13bcb..1e4a9522759c35b868447c8a0960643d468e4c71 100644 --- a/substrate/frame/assets/src/functions.rs +++ b/substrate/frame/assets/src/functions.rs @@ -132,6 +132,9 @@ impl, I: 'static> Pallet { Some(details) => details, None => return DepositConsequence::UnknownAsset, }; + if details.status == AssetStatus::Destroying { + return DepositConsequence::UnknownAsset + } if increase_supply && details.supply.checked_add(&amount).is_none() { return DepositConsequence::Overflow } @@ -175,6 +178,9 @@ impl, I: 'static> Pallet { if details.status == AssetStatus::Frozen { return Frozen } + if details.status == AssetStatus::Destroying { + return UnknownAsset + } if amount.is_zero() { return Success } @@ -709,6 +715,9 @@ impl, I: 'static> Pallet { ) -> DispatchResult { ensure!(!Asset::::contains_key(&id), Error::::InUse); ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + if let Some(next_id) = NextAssetId::::get() { + ensure!(id == next_id, Error::::BadAssetId); + } Asset::::insert( &id, diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index d5214922555890ac96a931458b01522da1b52779..6dbce717a8e8db6cd09af424d26b0f5557b8c650 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -174,6 +174,7 @@ use sp_runtime::{ }; use sp_std::prelude::*; +use core::marker::PhantomData; use frame_support::{ dispatch::DispatchResult, ensure, @@ -182,7 +183,7 @@ use frame_support::{ traits::{ tokens::{fungibles, DepositConsequence, WithdrawConsequence}, BalanceStatus::Reserved, - Currency, EnsureOriginWithArg, ReservableCurrency, StoredMap, + Currency, EnsureOriginWithArg, Incrementable, ReservableCurrency, StoredMap, }, }; use frame_system::Config as SystemConfig; @@ -206,8 +207,37 @@ pub trait AssetsCallback { } } -/// Empty implementation in case no callbacks are required. -impl AssetsCallback for () {} +#[impl_trait_for_tuples::impl_for_tuples(10)] +impl AssetsCallback for Tuple { + fn created(id: &AssetId, owner: &AccountId) -> Result<(), ()> { + for_tuples!( #( Tuple::created(id, owner)?; )* ); + Ok(()) + } + + fn destroyed(id: &AssetId) -> Result<(), ()> { + for_tuples!( #( Tuple::destroyed(id)?; )* ); + Ok(()) + } +} + +/// Auto-increment the [`NextAssetId`] when an asset is created. +/// +/// This has not effect if the [`NextAssetId`] value is not present. +pub struct AutoIncAssetId(PhantomData<(T, I)>); +impl, I> AssetsCallback for AutoIncAssetId +where + T::AssetId: Incrementable, +{ + fn created(_: &T::AssetId, _: &T::AccountId) -> Result<(), ()> { + let Some(next_id) = NextAssetId::::get() else { + // Auto increment for the asset id is not enabled. + return Ok(()); + }; + let next_id = next_id.increment().ok_or(())?; + NextAssetId::::put(next_id); + Ok(()) + } +} #[frame_support::pallet] pub mod pallet { @@ -361,6 +391,11 @@ pub mod pallet { type Extra: Member + Parameter + Default + MaxEncodedLen; /// Callback methods for asset state change (e.g. asset created or destroyed) + /// + /// Types implementing the [`AssetsCallback`] can be chained when listed together as a + /// tuple. + /// The [`AutoIncAssetId`] callback, in conjunction with the [`NextAssetId`], can be + /// used to set up auto-incrementing asset IDs for this collection. type CallbackHandle: AssetsCallback; /// Weight information for extrinsics in this pallet. @@ -415,6 +450,18 @@ pub mod pallet { ValueQuery, >; + /// The asset ID enforced for the next asset creation, if any present. Otherwise, this storage + /// item has no effect. + /// + /// This can be useful for setting up constraints for IDs of the new assets. For example, by + /// providing an initial [`NextAssetId`] and using the [`crate::AutoIncAssetId`] callback, an + /// auto-increment model can be applied to all new asset IDs. + /// + /// The initial next asset ID can be set using the [`GenesisConfig`] or the + /// [SetNextAssetId](`migration::next_asset_id::SetNextAssetId`) migration. + #[pallet::storage] + pub type NextAssetId, I: 'static = ()> = StorageValue<_, T::AssetId, OptionQuery>; + #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig, I: 'static = ()> { @@ -424,6 +471,13 @@ pub mod pallet { pub metadata: Vec<(T::AssetId, Vec, Vec, u8)>, /// Genesis accounts: id, account_id, balance pub accounts: Vec<(T::AssetId, T::AccountId, T::Balance)>, + /// Genesis [`NextAssetId`]. + /// + /// Refer to the [`NextAssetId`] item for more information. + /// + /// This does not enforce the asset ID for the [assets](`GenesisConfig::assets`) within the + /// genesis config. It sets the [`NextAssetId`] after they have been created. + pub next_asset_id: Option, } #[pallet::genesis_build] @@ -485,6 +539,10 @@ pub mod pallet { ); assert!(result.is_ok()); } + + if let Some(next_asset_id) = &self.next_asset_id { + NextAssetId::::put(next_asset_id); + } } } @@ -622,6 +680,8 @@ pub mod pallet { NotFrozen, /// Callback action resulted in error CallbackFailed, + /// The asset ID must be equal to the [`NextAssetId`]. + BadAssetId, } #[pallet::call(weight(>::WeightInfo))] @@ -636,7 +696,7 @@ pub mod pallet { /// /// Parameters: /// - `id`: The identifier of the new asset. This must not be currently in use to identify - /// an existing asset. + /// an existing asset. If [`NextAssetId`] is set, then this must be equal to it. /// - `admin`: The admin of this class of assets. The admin is the initial address of each /// member of the asset class's admin team. /// - `min_balance`: The minimum balance of this new asset that any single account must @@ -659,6 +719,10 @@ pub mod pallet { ensure!(!Asset::::contains_key(&id), Error::::InUse); ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + if let Some(next_id) = NextAssetId::::get() { + ensure!(id == next_id, Error::::BadAssetId); + } + let deposit = T::AssetDeposit::get(); T::Currency::reserve(&owner, deposit)?; @@ -698,7 +762,7 @@ pub mod pallet { /// Unlike `create`, no funds are reserved. /// /// - `id`: The identifier of the new asset. This must not be currently in use to identify - /// an existing asset. + /// an existing asset. If [`NextAssetId`] is set, then this must be equal to it. /// - `owner`: The owner of this class of assets. The owner has full superuser permissions /// over this asset, but may later change and configure the permissions using /// `transfer_ownership` and `set_team`. diff --git a/substrate/frame/assets/src/migration.rs b/substrate/frame/assets/src/migration.rs index dd7c12293e80f410301bb47cca013ae4f013e72f..9096f25fb791fe61b6832ac951baa27fb81c7598 100644 --- a/substrate/frame/assets/src/migration.rs +++ b/substrate/frame/assets/src/migration.rs @@ -22,6 +22,30 @@ use log; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; +pub mod next_asset_id { + use super::*; + use sp_core::Get; + + /// Set [`NextAssetId`] to the value of `ID` if [`NextAssetId`] does not exist yet. + pub struct SetNextAssetId, I: 'static = ()>( + core::marker::PhantomData<(ID, T, I)>, + ); + impl, I: 'static> OnRuntimeUpgrade for SetNextAssetId + where + T::AssetId: Incrementable, + ID: Get, + { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + if !NextAssetId::::exists() { + NextAssetId::::put(ID::get()); + T::DbWeight::get().reads_writes(1, 1) + } else { + T::DbWeight::get().reads(1) + } + } + } +} + pub mod v1 { use frame_support::{pallet_prelude::*, weights::Weight}; diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index f6173a451fffaee41f66dc6f697a40cd68e3648b..2c160840e147829f462ec3f407f00afdd77e9dda 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -23,7 +23,7 @@ use crate as pallet_assets; use codec::Encode; use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, + traits::{AsEnsureOriginWithArg, ConstU32}, }; use sp_io::storage; use sp_runtime::BuildStorage; @@ -49,20 +49,9 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<3>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - type FreezeIdentifier = (); - type MaxFreezes = (); } pub struct AssetsCallbackHandle; @@ -114,7 +103,7 @@ impl Config for Test { type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = frame_system::EnsureRoot; type Freezer = TestFreezer; - type CallbackHandle = AssetsCallbackHandle; + type CallbackHandle = (AssetsCallbackHandle, AutoIncAssetId); } use std::collections::HashMap; @@ -178,6 +167,7 @@ pub(crate) fn new_test_ext() -> sp_io::TestExternalities { // id, account_id, balance (999, 1, 100), ], + next_asset_id: None, }; config.assimilate_storage(&mut storage).unwrap(); diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index c7021bcad531046c8881a8a9ef8932f4bd54c011..c751fbdcaf1bb116bf2ecf47dc88b3e33c74bf12 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -22,7 +22,11 @@ use crate::{mock::*, Error}; use frame_support::{ assert_noop, assert_ok, dispatch::GetDispatchInfo, - traits::{fungibles::InspectEnumerable, tokens::Preservation::Protect, Currency}, + traits::{ + fungibles::InspectEnumerable, + tokens::{Preservation::Protect, Provenance}, + Currency, + }, }; use pallet_balances::Error as BalancesError; use sp_io::storage; @@ -1777,3 +1781,100 @@ fn asset_destroy_refund_existence_deposit() { assert_eq!(Balances::reserved_balance(&admin), 0); }); } + +#[test] +fn increasing_or_decreasing_destroying_asset_should_not_work() { + new_test_ext().execute_with(|| { + use frame_support::traits::fungibles::Inspect; + + let admin = 1; + let admin_origin = RuntimeOrigin::signed(admin); + + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, admin, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + + assert_eq!(Assets::can_deposit(0, &1, 10, Provenance::Extant), DepositConsequence::Success); + assert_eq!(Assets::can_withdraw(0, &1, 10), WithdrawConsequence::<_>::Success); + assert_eq!(Assets::can_increase(0, &1, 10, false), DepositConsequence::Success); + assert_eq!(Assets::can_decrease(0, &1, 10, false), WithdrawConsequence::<_>::Success); + + assert_ok!(Assets::start_destroy(admin_origin, 0)); + + assert_eq!( + Assets::can_deposit(0, &1, 10, Provenance::Extant), + DepositConsequence::UnknownAsset + ); + assert_eq!(Assets::can_withdraw(0, &1, 10), WithdrawConsequence::<_>::UnknownAsset); + assert_eq!(Assets::can_increase(0, &1, 10, false), DepositConsequence::UnknownAsset); + assert_eq!(Assets::can_decrease(0, &1, 10, false), WithdrawConsequence::<_>::UnknownAsset); + }); +} + +#[test] +fn asset_id_cannot_be_reused() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + // Asset id can be reused till auto increment is not enabled. + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); + + assert_ok!(Assets::start_destroy(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::finish_destroy(RuntimeOrigin::signed(1), 0)); + + assert!(!Asset::::contains_key(0)); + + // Asset id `0` is reused. + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); + assert!(Asset::::contains_key(0)); + + assert_ok!(Assets::start_destroy(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::finish_destroy(RuntimeOrigin::signed(1), 0)); + + assert!(!Asset::::contains_key(0)); + + // Enable auto increment. Next asset id must be 5. + pallet::NextAssetId::::put(5); + + assert_noop!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1), Error::::BadAssetId); + assert_noop!(Assets::create(RuntimeOrigin::signed(1), 1, 1, 1), Error::::BadAssetId); + assert_noop!( + Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1), + Error::::BadAssetId + ); + assert_noop!( + Assets::force_create(RuntimeOrigin::root(), 1, 1, true, 1), + Error::::BadAssetId + ); + + // Asset with id `5` is created. + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 5, 1, 1)); + assert!(Asset::::contains_key(5)); + + // Destroy asset with id `6`. + assert_ok!(Assets::start_destroy(RuntimeOrigin::signed(1), 5)); + assert_ok!(Assets::finish_destroy(RuntimeOrigin::signed(1), 5)); + + assert!(!Asset::::contains_key(0)); + + // Asset id `5` cannot be reused. + assert_noop!(Assets::create(RuntimeOrigin::signed(1), 5, 1, 1), Error::::BadAssetId); + + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 6, 1, 1)); + assert!(Asset::::contains_key(6)); + + // Destroy asset with id `6`. + assert_ok!(Assets::start_destroy(RuntimeOrigin::signed(1), 6)); + assert_ok!(Assets::finish_destroy(RuntimeOrigin::signed(1), 6)); + + assert!(!Asset::::contains_key(6)); + + // Asset id `6` cannot be reused with force. + assert_noop!( + Assets::force_create(RuntimeOrigin::root(), 6, 1, false, 1), + Error::::BadAssetId + ); + + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 7, 1, false, 1)); + assert!(Asset::::contains_key(7)); + }); +} diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index 8083c12d4b39fb0b860a738f700168bcad0bc2f3..7e2f5fc74ab22cebf835c1924478067f8a2abc3b 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs index 9f51f04208aaccca281398b0382718f2165d8a0e..47ebe6a8f0acf26021a84f37bf76d703aeba1374 100644 --- a/substrate/frame/atomic-swap/src/tests.rs +++ b/substrate/frame/atomic-swap/src/tests.rs @@ -20,10 +20,7 @@ use super::*; use crate as pallet_atomic_swap; -use frame_support::{ - derive_impl, - traits::{ConstU32, ConstU64}, -}; +use frame_support::{derive_impl, traits::ConstU32}; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -43,20 +40,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index 9264d2f4a643cdedd02fa17f81f90d949c2c0729..97582e7d2e9dbf110a1c22b5de6ad5e592acb980 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -16,20 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-timestamp = { path = "../timestamp", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } -sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-timestamp = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index c21f9b5c904556f8a0c01a0e5f1e468e32685ee5..79057d94107be2485c6bfb48ec9a20373b83e143 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -16,23 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-session = { path = "../session", default-features = false, features = [ +], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-session = { features = [ "historical", -] } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } -sp-authority-discovery = { path = "../../primitives/authority-discovery", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +], workspace = true } +sp-application-crypto = { workspace = true } +sp-authority-discovery = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index dd78e3404ef0b3c30adfa5b24bd4444c2fec10d1..58292c0eba393d482740a7cd59a748b17658f8c4 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +], workspace = true } +impl-trait-for-tuples = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index d06b7f7454648ea529595aa91e1e3a0f9e2b5486..68638798e796a0a0cf67fd428216fb5b8114e95d 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -16,31 +16,31 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-authorship = { path = "../authorship", default-features = false } -pallet-session = { path = "../session", default-features = false } -pallet-timestamp = { path = "../timestamp", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-consensus-babe = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-std = { workspace = true } [dev-dependencies] -frame-election-provider-support = { path = "../election-provider-support" } -pallet-balances = { path = "../balances" } -pallet-offences = { path = "../offences" } -pallet-staking = { path = "../staking" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -sp-core = { path = "../../primitives/core" } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-offences = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index 686ba6ec2d6344cdd49cac44f352d5fd76c45c36..9e16f1d09515688e0fc7226918d197dd86d9cb7e 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -96,11 +96,11 @@ pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { fn trigger(now: BlockNumberFor) { - if >::should_epoch_change(now) { - let authorities = >::authorities(); + if Pallet::::should_epoch_change(now) { + let authorities = Authorities::::get(); let next_authorities = authorities.clone(); - >::enact_epoch_change(authorities, next_authorities, None); + Pallet::::enact_epoch_change(authorities, next_authorities, None); } } } @@ -185,12 +185,10 @@ pub mod pallet { /// Current epoch index. #[pallet::storage] - #[pallet::getter(fn epoch_index)] pub type EpochIndex = StorageValue<_, u64, ValueQuery>; /// Current epoch authorities. #[pallet::storage] - #[pallet::getter(fn authorities)] pub type Authorities = StorageValue< _, WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, @@ -200,12 +198,10 @@ pub mod pallet { /// The slot at which the first epoch actually started. This is 0 /// until the first block of the chain. #[pallet::storage] - #[pallet::getter(fn genesis_slot)] pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; /// Current slot number. #[pallet::storage] - #[pallet::getter(fn current_slot)] pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; /// The epoch randomness for the *current* epoch. @@ -222,20 +218,19 @@ pub mod pallet { // array size because the metadata API currently doesn't resolve the // variable to its underlying value. #[pallet::storage] - #[pallet::getter(fn randomness)] pub type Randomness = StorageValue<_, BabeRandomness, ValueQuery>; /// Pending epoch configuration change that will be applied when the next epoch is enacted. #[pallet::storage] - pub(super) type PendingEpochConfigChange = StorageValue<_, NextConfigDescriptor>; + pub type PendingEpochConfigChange = StorageValue<_, NextConfigDescriptor>; /// Next epoch randomness. #[pallet::storage] - pub(super) type NextRandomness = StorageValue<_, BabeRandomness, ValueQuery>; + pub type NextRandomness = StorageValue<_, BabeRandomness, ValueQuery>; /// Next epoch authorities. #[pallet::storage] - pub(super) type NextAuthorities = StorageValue< + pub type NextAuthorities = StorageValue< _, WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, ValueQuery, @@ -251,11 +246,11 @@ pub mod pallet { /// We reset all segments and return to `0` at the beginning of every /// epoch. #[pallet::storage] - pub(super) type SegmentIndex = StorageValue<_, u32, ValueQuery>; + pub type SegmentIndex = StorageValue<_, u32, ValueQuery>; /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. #[pallet::storage] - pub(super) type UnderConstruction = StorageMap< + pub type UnderConstruction = StorageMap< _, Twox64Concat, u32, @@ -266,16 +261,14 @@ pub mod pallet { /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. #[pallet::storage] - #[pallet::getter(fn initialized)] - pub(super) type Initialized = StorageValue<_, Option>; + pub type Initialized = StorageValue<_, Option>; /// This field should always be populated during block processing unless /// secondary plain slots are enabled (which don't contain a VRF output). /// /// It is set in `on_finalize`, before it will contain the value from the last block. #[pallet::storage] - #[pallet::getter(fn author_vrf_randomness)] - pub(super) type AuthorVrfRandomness = StorageValue<_, Option, ValueQuery>; + pub type AuthorVrfRandomness = StorageValue<_, Option, ValueQuery>; /// The block numbers when the last and current epoch have started, respectively `N-1` and /// `N`. @@ -292,19 +285,17 @@ pub mod pallet { /// on block finalization. Querying this storage entry outside of block /// execution context should always yield zero. #[pallet::storage] - #[pallet::getter(fn lateness)] - pub(super) type Lateness = StorageValue<_, BlockNumberFor, ValueQuery>; + pub type Lateness = StorageValue<_, BlockNumberFor, ValueQuery>; /// The configuration for the current epoch. Should never be `None` as it is initialized in /// genesis. #[pallet::storage] - #[pallet::getter(fn epoch_config)] - pub(super) type EpochConfig = StorageValue<_, BabeEpochConfiguration>; + pub type EpochConfig = StorageValue<_, BabeEpochConfiguration>; /// The configuration for the next epoch, `None` if the config will not change /// (you can fallback to `EpochConfig` instead in that case). #[pallet::storage] - pub(super) type NextEpochConfig = StorageValue<_, BabeEpochConfiguration>; + pub type NextEpochConfig = StorageValue<_, BabeEpochConfiguration>; /// A list of the last 100 skipped epochs and the corresponding session index /// when the epoch was skipped. @@ -315,8 +306,7 @@ pub mod pallet { /// a validator was the owner of a given key on a given session, and what the /// active epoch index was during that session. #[pallet::storage] - #[pallet::getter(fn skipped_epochs)] - pub(super) type SkippedEpochs = + pub type SkippedEpochs = StorageValue<_, BoundedVec<(u64, SessionIndex), ConstU32<100>>, ValueQuery>; #[derive(frame_support::DefaultNoBound)] @@ -368,7 +358,7 @@ pub mod pallet { .and_then(|(authority, _)| { let public = authority.as_inner_ref(); let transcript = sp_consensus_babe::make_vrf_transcript( - &Self::randomness(), + &Randomness::::get(), CurrentSlot::::get(), EpochIndex::::get(), ); @@ -510,7 +500,7 @@ impl FindAuthor for Pallet { impl IsMember for Pallet { fn is_member(authority_id: &AuthorityId) -> bool { - >::authorities().iter().any(|id| &id.0 == authority_id) + Authorities::::get().iter().any(|id| &id.0 == authority_id) } } @@ -526,6 +516,47 @@ impl pallet_session::ShouldEndSession> for Pallet Pallet { + /// Public function to access epoch_index storage. + pub fn epoch_index() -> u64 { + EpochIndex::::get() + } + /// Public function to access authorities storage. + pub fn authorities() -> WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities> { + Authorities::::get() + } + /// Public function to access genesis_slot storage. + pub fn genesis_slot() -> Slot { + GenesisSlot::::get() + } + /// Public function to access current_slot storage. + pub fn current_slot() -> Slot { + CurrentSlot::::get() + } + /// Public function to access randomness storage. + pub fn randomness() -> BabeRandomness { + Randomness::::get() + } + /// Public function to access initialized storage. + pub fn initialized() -> Option> { + Initialized::::get() + } + /// Public function to access author_vrf_randomness storage. + pub fn author_vrf_randomness() -> Option { + AuthorVrfRandomness::::get() + } + /// Public function to access lateness storage. + pub fn lateness() -> BlockNumberFor { + Lateness::::get() + } + /// Public function to access epoch_config storage. + pub fn epoch_config() -> Option { + EpochConfig::::get() + } + /// Public function to access skipped_epochs storage. + pub fn skipped_epochs() -> BoundedVec<(u64, SessionIndex), ConstU32<100>> { + SkippedEpochs::::get() + } + /// Determine the BABE slot duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { // we double the minimum block-period so each author can always propose within @@ -588,7 +619,7 @@ impl Pallet { ) { // PRECONDITION: caller has done initialization and is guaranteed // by the session module to be called before this. - debug_assert!(Self::initialized().is_some()); + debug_assert!(Initialized::::get().is_some()); if authorities.is_empty() { log::warn!(target: LOG_TARGET, "Ignoring empty epoch change."); @@ -655,7 +686,7 @@ impl Pallet { NextAuthorities::::put(&next_authorities); // Update the start blocks of the previous and new current epoch. - >::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { + EpochStart::::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { *previous_epoch_start_block = sp_std::mem::take(current_epoch_start_block); *current_epoch_start_block = >::block_number(); }); @@ -701,8 +732,8 @@ impl Pallet { epoch_index: EpochIndex::::get(), start_slot: Self::current_epoch_start(), duration: T::EpochDuration::get(), - authorities: Self::authorities().into_inner(), - randomness: Self::randomness(), + authorities: Authorities::::get().into_inner(), + randomness: Randomness::::get(), config: EpochConfig::::get() .expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), } @@ -779,8 +810,8 @@ impl Pallet { // we use the same values as genesis because we haven't collected any // randomness yet. let next = NextEpochDescriptor { - authorities: Self::authorities().into_inner(), - randomness: Self::randomness(), + authorities: Authorities::::get().into_inner(), + randomness: Randomness::::get(), }; Self::deposit_consensus(ConsensusLog::NextEpochData(next)); @@ -789,7 +820,7 @@ impl Pallet { fn initialize(now: BlockNumberFor) { // since `initialize` can be called twice (e.g. if session module is present) // let's ensure that we only do the initialization once per block - let initialized = Self::initialized().is_some(); + let initialized = Initialized::::get().is_some(); if initialized { return } @@ -940,7 +971,7 @@ impl frame_support::traits::EstimateNextSessionRotation frame_support::traits::Lateness> for Pallet { fn lateness(&self) -> BlockNumberFor { - Self::lateness() + Lateness::::get() } } diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 395a86e6528807ff21ee2667d1446016b6826a1e..e193a2e3b645446906b7c7b328faafaa59c382b8 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -28,7 +28,6 @@ use frame_support::{ traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; -use pallet_staking::FixedNominationsQuota; use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ crypto::{KeyTypeId, Pair, VrfSecret}, @@ -112,20 +111,11 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } pallet_staking_reward_curve::build! { @@ -142,7 +132,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; - pub const SlashDeferDuration: EraIndex = 0; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -157,35 +146,20 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBounds; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { - type RewardRemainder = (); - type CurrencyToVote = (); - type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type CurrencyBalance = ::Balance; - type Slash = (); - type Reward = (); type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; - type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxExposurePageSize = ConstU32<64>; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { @@ -239,7 +213,7 @@ pub fn go_to_block(n: u64, s: u64) { /// Slots will grow accordingly to blocks pub fn progress_to_block(n: u64) { - let mut slot = u64::from(Babe::current_slot()) + 1; + let mut slot = u64::from(CurrentSlot::::get()) + 1; for i in System::block_number() + 1..=n { go_to_block(i, slot); slot += 1; @@ -298,7 +272,8 @@ pub fn make_vrf_signature_and_randomness( slot: Slot, pair: &sp_consensus_babe::AuthorityPair, ) -> (VrfSignature, Randomness) { - let transcript = sp_consensus_babe::make_vrf_transcript(&Babe::randomness(), slot, 0); + let transcript = + sp_consensus_babe::make_vrf_transcript(&pallet_babe::Randomness::::get(), slot, 0); let randomness = pair.as_ref().make_bytes(sp_consensus_babe::RANDOMNESS_VRF_CONTEXT, &transcript); diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index e65f1844f88f9ffd9dc4e0a5c0f81e4f5e1c9459..b9a214ca105c846fae4be018149c7aba55c521e8 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -43,7 +43,7 @@ fn empty_randomness_is_correct() { #[test] fn initial_values() { - new_test_ext(4).execute_with(|| assert_eq!(Babe::authorities().len(), 4)) + new_test_ext(4).execute_with(|| assert_eq!(Authorities::::get().len(), 4)) } #[test] @@ -68,25 +68,25 @@ fn first_block_epoch_zero_start() { let pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_signature); - assert_eq!(Babe::genesis_slot(), Slot::from(0)); + assert_eq!(GenesisSlot::::get(), Slot::from(0)); System::reset_events(); System::initialize(&1, &Default::default(), &pre_digest); // see implementation of the function for details why: we issue an // epoch-change digest but don't do it via the normal session mechanism. assert!(!Babe::should_end_session(1)); - assert_eq!(Babe::genesis_slot(), genesis_slot); - assert_eq!(Babe::current_slot(), genesis_slot); - assert_eq!(Babe::epoch_index(), 0); + assert_eq!(GenesisSlot::::get(), genesis_slot); + assert_eq!(CurrentSlot::::get(), genesis_slot); + assert_eq!(EpochIndex::::get(), 0); Babe::on_finalize(1); let header = System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); assert_eq!(SegmentIndex::::get(), 0); assert_eq!(UnderConstruction::::get(0), vec![vrf_randomness]); - assert_eq!(Babe::randomness(), [0; 32]); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(Randomness::::get(), [0; 32]); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!(header.digest.logs.len(), 2); @@ -95,8 +95,8 @@ fn first_block_epoch_zero_start() { let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( sp_consensus_babe::digests::NextEpochDescriptor { - authorities: Babe::authorities().into_inner(), - randomness: Babe::randomness(), + authorities: Authorities::::get().into_inner(), + randomness: Randomness::::get(), }, ); let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); @@ -118,19 +118,19 @@ fn current_slot_is_processed_on_initialization() { System::reset_events(); System::initialize(&1, &Default::default(), &pre_digest); - assert_eq!(Babe::current_slot(), Slot::from(0)); - assert!(Babe::initialized().is_none()); + assert_eq!(CurrentSlot::::get(), Slot::from(0)); + assert!(Initialized::::get().is_none()); // current slot is updated on initialization Babe::initialize(1); - assert_eq!(Babe::current_slot(), genesis_slot); - assert!(Babe::initialized().is_some()); + assert_eq!(CurrentSlot::::get(), genesis_slot); + assert!(Initialized::::get().is_some()); // but author vrf randomness isn't - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); // instead it is updated on block finalization Babe::on_finalize(1); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); }) } @@ -151,16 +151,16 @@ where // author vrf randomness is not updated on initialization Babe::initialize(1); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); // instead it is updated on block finalization to account for any // epoch changes that might happen during the block Babe::on_finalize(1); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); // and it is kept after finalizing the block System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); }) } @@ -182,14 +182,14 @@ fn no_author_vrf_output_for_secondary_plain() { System::reset_events(); System::initialize(&1, &Default::default(), &secondary_plain_pre_digest); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); Babe::initialize(1); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); Babe::on_finalize(1); System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); }) } @@ -210,14 +210,14 @@ fn can_predict_next_epoch_change() { assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(*Babe::genesis_slot(), 6); - assert_eq!(*Babe::current_slot(), 6); - assert_eq!(Babe::epoch_index(), 0); + assert_eq!(*GenesisSlot::::get(), 6); + assert_eq!(*CurrentSlot::::get(), 6); + assert_eq!(EpochIndex::::get(), 0); progress_to_block(5); - assert_eq!(Babe::epoch_index(), 5 / 3); - assert_eq!(*Babe::current_slot(), 10); + assert_eq!(EpochIndex::::get(), 5 / 3); + assert_eq!(*CurrentSlot::::get(), 10); // next epoch change will be at assert_eq!(*Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now @@ -266,9 +266,9 @@ fn can_enact_next_config() { assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(*Babe::genesis_slot(), 6); - assert_eq!(*Babe::current_slot(), 6); - assert_eq!(Babe::epoch_index(), 0); + assert_eq!(*GenesisSlot::::get(), 6); + assert_eq!(*CurrentSlot::::get(), 6); + assert_eq!(EpochIndex::::get(), 0); go_to_block(2, 7); let current_config = BabeEpochConfiguration { @@ -431,7 +431,7 @@ fn report_equivocation_current_session_works() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); let validators = Session::validators(); // make sure that all authorities have the same balance @@ -508,7 +508,7 @@ fn report_equivocation_old_session_works() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // we will use the validator at index 0 as the offending authority let offending_validator_index = 1; @@ -566,7 +566,7 @@ fn report_equivocation_invalid_key_owner_proof() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // we will use the validator at index 0 as the offending authority let offending_validator_index = 0; @@ -629,7 +629,7 @@ fn report_equivocation_invalid_equivocation_proof() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // we will use the validator at index 0 as the offending authority let offending_validator_index = 0; @@ -734,7 +734,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // generate and report an equivocation for the validator at index 0 let offending_validator_index = 0; @@ -848,7 +848,7 @@ fn report_equivocation_after_skipped_epochs_works() { assert_eq!(SkippedEpochs::::get(), vec![(10, 1)]); // generate an equivocation proof for validator at index 1 - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); let offending_validator_index = 1; let offending_authority_pair = pairs .into_iter() diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 3429d2f28a6cc7445e383a28e5f817b876251f2b..bf57bd2af53cef42fda4c963b327f52aa08edb89 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -16,41 +16,41 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # primitives -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # FRAME -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-election-provider-support = { workspace = true } # third party log = { workspace = true } -docify = "0.2.8" -aquamarine = { version = "0.5.0" } +docify = { workspace = true } +aquamarine = { workspace = true } # Optional imports for benchmarking -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -pallet-balances = { path = "../balances", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false, optional = true } -sp-io = { path = "../../primitives/io", default-features = false, optional = true } -sp-tracing = { path = "../../primitives/tracing", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-balances = { optional = true, workspace = true } +sp-core = { optional = true, workspace = true } +sp-io = { optional = true, workspace = true } +sp-tracing = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -sp-tracing = { path = "../../primitives/tracing" } -pallet-balances = { path = "../balances" } -frame-election-provider-support = { path = "../election-provider-support" } -frame-benchmarking = { path = "../benchmarking" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/bags-list/fuzzer/Cargo.toml b/substrate/frame/bags-list/fuzzer/Cargo.toml index 20760141b23612bee46b8a79ce40874dfdef8a7b..f8631be159cd42a80a9ea49eb3de0a953cf843bb 100644 --- a/substrate/frame/bags-list/fuzzer/Cargo.toml +++ b/substrate/frame/bags-list/fuzzer/Cargo.toml @@ -13,10 +13,10 @@ publish = false workspace = true [dependencies] -honggfuzz = "0.5" -rand = { version = "0.8", features = ["small_rng", "std"] } -frame-election-provider-support = { path = "../../election-provider-support", features = ["fuzz"] } -pallet-bags-list = { path = "..", features = ["fuzz"] } +honggfuzz = { workspace = true } +rand = { features = ["small_rng", "std"], workspace = true, default-features = true } +frame-election-provider-support = { features = ["fuzz"], workspace = true, default-features = true } +pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } [[bin]] name = "bags-list" diff --git a/substrate/frame/bags-list/remote-tests/Cargo.toml b/substrate/frame/bags-list/remote-tests/Cargo.toml index 266355f5cabe19214643db024e641339acd60630..45f2498aa88b4f6b6f4b4b83fa7b3e7eb5275d13 100644 --- a/substrate/frame/bags-list/remote-tests/Cargo.toml +++ b/substrate/frame/bags-list/remote-tests/Cargo.toml @@ -17,21 +17,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # frame -pallet-staking = { path = "../../staking" } -pallet-bags-list = { path = "..", features = ["fuzz"] } -frame-election-provider-support = { path = "../../election-provider-support" } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } +pallet-staking = { workspace = true, default-features = true } +pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } # core -sp-storage = { path = "../../../primitives/storage" } -sp-core = { path = "../../../primitives/core" } -sp-tracing = { path = "../../../primitives/tracing" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-std = { path = "../../../primitives/std" } +sp-storage = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } # utils -remote-externalities = { package = "frame-remote-externalities", path = "../../../utils/frame/remote-externalities" } +remote-externalities = { workspace = true, default-features = true } # others log = { workspace = true, default-features = true } diff --git a/substrate/frame/bags-list/src/list/tests.rs b/substrate/frame/bags-list/src/list/tests.rs index cd39b083172670f0f8f7450ffc546b6fda187313..e5fff76d75c70bf0cec47f1083a34a3ed6f65fd6 100644 --- a/substrate/frame/bags-list/src/list/tests.rs +++ b/substrate/frame/bags-list/src/list/tests.rs @@ -777,7 +777,8 @@ mod bags { assert_eq!(bag_1000.tail, Some(4)); assert_eq!(bag_1000.iter().count(), 3); bag_1000.insert_node_unchecked(node(4, None, None, bag_1000.bag_upper)); // panics in debug - assert_eq!(bag_1000.iter().count(), 3); // in release we expect it to silently ignore the request. + assert_eq!(bag_1000.iter().count(), 3); // in release we expect it to silently ignore the + // request. }); } diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 4da14aea12809f09c797d0eb0925e1c43bfdc8d6..5fa8fa84bf59e2401fba74f17f9c838f6cf3775a 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -16,22 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -docify = "0.2.8" +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +docify = { workspace = true } [dev-dependencies] -pallet-transaction-payment = { path = "../transaction-payment" } -frame-support = { path = "../support", features = ["experimental"] } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -paste = "1.0.12" +pallet-transaction-payment = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +paste = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 56eb81b49e2db695abbb8d705db7cef21f599416..d01884293c0902ea66636877d1b3d65fbd74f5a1 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -158,6 +158,7 @@ use frame_support::{ tokens::{ fungible, BalanceStatus as Status, DepositConsequence, Fortitude::{self, Force, Polite}, + IdAmount, Preservation::{Expendable, Preserve, Protect}, WithdrawConsequence, }, @@ -177,8 +178,7 @@ use sp_runtime::{ }; use sp_std::{cmp, fmt::Debug, mem, prelude::*, result}; pub use types::{ - AccountData, AdjustmentDirection, BalanceLock, DustCleaner, ExtraFlags, IdAmount, Reasons, - ReserveData, + AccountData, AdjustmentDirection, BalanceLock, DustCleaner, ExtraFlags, Reasons, ReserveData, }; pub use weights::WeightInfo; @@ -222,13 +222,13 @@ pub mod pallet { type ExistentialDeposit = ConstU64<1>; type ReserveIdentifier = (); - type FreezeIdentifier = (); + type FreezeIdentifier = Self::RuntimeFreezeReason; type DustRemoval = (); type MaxLocks = ConstU32<100>; type MaxReserves = ConstU32<100>; - type MaxFreezes = ConstU32<100>; + type MaxFreezes = VariantCountOf; type WeightInfo = (); } diff --git a/substrate/frame/balances/src/tests/dispatchable_tests.rs b/substrate/frame/balances/src/tests/dispatchable_tests.rs index 4bc96f6b43d97444252f42e4820d97168ed50e1e..ebc9f1b1a3695cb446e64018acc2bca772169b29 100644 --- a/substrate/frame/balances/src/tests/dispatchable_tests.rs +++ b/substrate/frame/balances/src/tests/dispatchable_tests.rs @@ -281,7 +281,7 @@ fn force_adjust_total_issuance_saturates() { ExtBuilder::default().build_and_execute_with(|| { assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1337, 64)); let ti = Balances::total_issuance(); - let max = Balance::max_value(); + let max = ::Balance::max_value(); assert_eq!(ti, 64); // Increment saturates: diff --git a/substrate/frame/balances/src/tests/fungible_tests.rs b/substrate/frame/balances/src/tests/fungible_tests.rs index 52fbe10bedec0f7131fe35fa7e6f809968741d42..1a09303a6590d54b3bb94ffb1d870d5c1e687b24 100644 --- a/substrate/frame/balances/src/tests/fungible_tests.rs +++ b/substrate/frame/balances/src/tests/fungible_tests.rs @@ -18,13 +18,20 @@ //! Tests regarding the functionality of the `fungible` trait set implementations. use super::*; -use frame_support::traits::tokens::{ - Fortitude::{Force, Polite}, - Precision::{BestEffort, Exact}, - Preservation::{Expendable, Preserve, Protect}, - Restriction::Free, +use frame_support::traits::{ + tokens::{ + Fortitude::{Force, Polite}, + Precision::{BestEffort, Exact}, + Preservation::{Expendable, Preserve, Protect}, + Restriction::Free, + }, + Consideration, Footprint, LinearStoragePrice, }; -use fungible::{Inspect, InspectFreeze, InspectHold, Mutate, MutateFreeze, MutateHold, Unbalanced}; +use fungible::{ + FreezeConsideration, HoldConsideration, Inspect, InspectFreeze, InspectHold, + LoneFreezeConsideration, LoneHoldConsideration, Mutate, MutateFreeze, MutateHold, Unbalanced, +}; +use sp_core::ConstU64; #[test] fn inspect_trait_reducible_balance_basic_works() { @@ -493,3 +500,161 @@ fn withdraw_precision_exact_works() { ); }); } + +#[test] +fn freeze_consideration_works() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + type Consideration = FreezeConsideration< + u64, + Balances, + FooReason, + LinearStoragePrice, ConstU64<1>, u64>, + Footprint, + >; + + let who = 4; + // freeze amount taken somewhere outside of our (Consideration) scope. + let extend_freeze = 15; + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10); + + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 4); + + assert_ok!(Balances::increase_frozen(&TestId::Foo, &who, extend_freeze)); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 4 + extend_freeze); + + let ticket = ticket.update(&who, Footprint::from_parts(8, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 8 + extend_freeze); + + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0 + extend_freeze); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10 + extend_freeze); + + let _ = ticket.drop(&who).unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0 + extend_freeze); + }); +} + +#[test] +fn hold_consideration_works() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + type Consideration = HoldConsideration< + u64, + Balances, + FooReason, + LinearStoragePrice, ConstU64<1>, u64>, + Footprint, + >; + + let who = 4; + // hold amount taken somewhere outside of our (Consideration) scope. + let extend_hold = 15; + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10); + + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 4); + + assert_ok!(Balances::hold(&TestId::Foo, &who, extend_hold)); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 4 + extend_hold); + + let ticket = ticket.update(&who, Footprint::from_parts(8, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 8 + extend_hold); + + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0 + extend_hold); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10 + extend_hold); + + let _ = ticket.drop(&who).unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0 + extend_hold); + }); +} + +#[test] +fn lone_freeze_consideration_works() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + type Consideration = LoneFreezeConsideration< + u64, + Balances, + FooReason, + LinearStoragePrice, ConstU64<1>, u64>, + Footprint, + >; + + let who = 4; + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10); + + assert_ok!(Balances::increase_frozen(&TestId::Foo, &who, 5)); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 15); + + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 4); + + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10); + + let _ = ticket.drop(&who).unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); + }); +} + +#[test] +fn lone_hold_consideration_works() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + type Consideration = LoneHoldConsideration< + u64, + Balances, + FooReason, + LinearStoragePrice, ConstU64<1>, u64>, + Footprint, + >; + + let who = 4; + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10); + + assert_ok!(Balances::hold(&TestId::Foo, &who, 5)); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 15); + + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 4); + + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10); + + let _ = ticket.drop(&who).unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); + }); +} diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index 0abf2251290fee6881f0346aaab7df37ff2f0efb..ba0cdabdabbbdc8459fcb551ae3a1b704d479262 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -27,7 +27,7 @@ use frame_support::{ parameter_types, traits::{ fungible, ConstU32, ConstU8, Imbalance as ImbalanceT, OnUnbalanced, StorageMapShim, - StoredMap, VariantCount, WhitelistedStorageKeys, + StoredMap, VariantCount, VariantCountOf, WhitelistedStorageKeys, }, weights::{IdentityFee, Weight}, }; @@ -107,22 +107,21 @@ impl pallet_transaction_payment::Config for Test { type FeeMultiplierUpdate = (); } -pub(crate) type Balance = u64; +parameter_types! { + pub FooReason: TestId = TestId::Foo; +} +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl Config for Test { - type Balance = Balance; type DustRemoval = DustTrap; - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = TestAccountStore; - type MaxLocks = ConstU32<50>; type MaxReserves = ConstU32<2>; type ReserveIdentifier = TestId; - type WeightInfo = (); type RuntimeHoldReason = TestId; - type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeFreezeReason = TestId; type FreezeIdentifier = TestId; - type MaxFreezes = ConstU32<2>; + type MaxFreezes = VariantCountOf; } #[derive(Clone)] diff --git a/substrate/frame/balances/src/types.rs b/substrate/frame/balances/src/types.rs index 3e36a83575c892812d687e5882784b023754fe44..917b7507d7c941156a080163bf029f468c75b097 100644 --- a/substrate/frame/balances/src/types.rs +++ b/substrate/frame/balances/src/types.rs @@ -78,15 +78,6 @@ pub struct ReserveData { pub amount: Balance, } -/// An identifier and balance. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] -pub struct IdAmount { - /// An identifier for this item. - pub id: Id, - /// Some amount for this item. - pub amount: Balance, -} - /// All balance information for an account. #[derive(Encode, Decode, Clone, PartialEq, Eq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo)] pub struct AccountData { diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 51abc306265d60a3ffc990fb6878831e12593a60..b46998a857426fb3edfa8affaea702cffc2f4732 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -12,28 +12,28 @@ homepage = "https://substrate.io" workspace = true [dependencies] -array-bytes = { version = "6.2.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +array-bytes = { optional = true, workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-beefy = { path = "../beefy", default-features = false } -pallet-mmr = { path = "../merkle-mountain-range", default-features = false } -pallet-session = { path = "../session", default-features = false } -sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false } +binary-merkle-tree = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-beefy = { workspace = true } +pallet-mmr = { workspace = true } +pallet-session = { workspace = true } +sp-consensus-beefy = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-api = { workspace = true } +sp-state-machine = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -sp-staking = { path = "../../primitives/staking" } +array-bytes = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/beefy-mmr/src/lib.rs b/substrate/frame/beefy-mmr/src/lib.rs index e423f1b342f2fc4faa9095c1cecf32556d0fd3cd..18ebc9d8f38a765153bbd2cde73265ee1e87f480 100644 --- a/substrate/frame/beefy-mmr/src/lib.rs +++ b/substrate/frame/beefy-mmr/src/lib.rs @@ -33,20 +33,22 @@ //! //! and thanks to versioning can be easily updated in the future. -use sp_runtime::traits::{Convert, Member}; +use sp_runtime::traits::{Convert, Header, Member}; use sp_std::prelude::*; use codec::Decode; -use pallet_mmr::{LeafDataProvider, ParentNumberAndHash}; +use pallet_mmr::{primitives::AncestryProof, LeafDataProvider, ParentNumberAndHash}; use sp_consensus_beefy::{ + known_payloads, mmr::{BeefyAuthoritySet, BeefyDataProvider, BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion}, - ValidatorSet as BeefyValidatorSet, + AncestryHelper, Commitment, ConsensusLog, ValidatorSet as BeefyValidatorSet, }; use frame_support::{crypto::ecdsa::ECDSAExt, traits::Get}; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; pub use pallet::*; +use sp_runtime::generic::OpaqueDigestItemId; #[cfg(test)] mod mock; @@ -172,6 +174,75 @@ where } } +impl AncestryHelper> for Pallet +where + T: pallet_mmr::Config, +{ + type Proof = AncestryProof>; + type ValidationContext = MerkleRootOf; + + fn extract_validation_context(header: HeaderFor) -> Option { + // Check if the provided header is canonical. + let expected_hash = frame_system::Pallet::::block_hash(header.number()); + if expected_hash != header.hash() { + return None; + } + + // Extract the MMR root from the header digest + header.digest().convert_first(|l| { + l.try_to(OpaqueDigestItemId::Consensus(&sp_consensus_beefy::BEEFY_ENGINE_ID)) + .and_then(|log: ConsensusLog<::BeefyId>| match log { + ConsensusLog::MmrRoot(mmr_root) => Some(mmr_root), + _ => None, + }) + }) + } + + fn is_non_canonical( + commitment: &Commitment>, + proof: Self::Proof, + context: Self::ValidationContext, + ) -> bool { + let commitment_leaf_count = + match pallet_mmr::Pallet::::block_num_to_leaf_count(commitment.block_number) { + Ok(commitment_leaf_count) => commitment_leaf_count, + Err(_) => { + // We can't prove that the commitment is non-canonical if the + // `commitment.block_number` is invalid. + return false + }, + }; + if commitment_leaf_count != proof.prev_leaf_count { + // Can't prove that the commitment is non-canonical if the `commitment.block_number` + // doesn't match the ancestry proof. + return false; + } + + let canonical_mmr_root = context; + let canonical_prev_root = + match pallet_mmr::Pallet::::verify_ancestry_proof(canonical_mmr_root, proof) { + Ok(canonical_prev_root) => canonical_prev_root, + Err(_) => { + // Can't prove that the commitment is non-canonical if the proof + // is invalid. + return false + }, + }; + + let commitment_root = + match commitment.payload.get_decoded::>(&known_payloads::MMR_ROOT_ID) { + Some(commitment_root) => commitment_root, + None => { + // If the commitment doesn't contain any MMR root, while the proof is valid, + // the commitment is invalid + return true + }, + }; + + canonical_prev_root != commitment_root + } +} + impl Pallet { /// Return the currently active BEEFY authority set proof. pub fn authority_set_proof() -> BeefyAuthoritySet> { diff --git a/substrate/frame/beefy-mmr/src/mock.rs b/substrate/frame/beefy-mmr/src/mock.rs index d59c219d3e71eae1c38975fc8bec137cc27b9076..0521bdabbe4958c99aebceef48a4a4a8b031cb6a 100644 --- a/substrate/frame/beefy-mmr/src/mock.rs +++ b/substrate/frame/beefy-mmr/src/mock.rs @@ -101,6 +101,7 @@ impl pallet_beefy::Config for Test { type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = ConstU64<100>; type OnNewValidatorSet = BeefyMmr; + type AncestryHelper = BeefyMmr; type WeightInfo = (); type KeyOwnerProof = sp_core::Void; type EquivocationReportSystem = (); diff --git a/substrate/frame/beefy-mmr/src/tests.rs b/substrate/frame/beefy-mmr/src/tests.rs index fac799bf64e430c0908602697cc8f97f0a8c76c6..f99835a1dc0a5fc2535cdb6cd457351770978987 100644 --- a/substrate/frame/beefy-mmr/src/tests.rs +++ b/substrate/frame/beefy-mmr/src/tests.rs @@ -19,11 +19,15 @@ use std::vec; use codec::{Decode, Encode}; use sp_consensus_beefy::{ + known_payloads, mmr::{BeefyNextAuthoritySet, MmrLeafVersion}, - ValidatorSet, + AncestryHelper, Commitment, Payload, ValidatorSet, }; -use sp_core::H256; +use sp_core::{ + offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, + H256, +}; use sp_io::TestExternalities; use sp_runtime::{traits::Keccak256, DigestItem}; @@ -31,8 +35,9 @@ use frame_support::traits::OnInitialize; use crate::mock::*; -fn init_block(block: u64) { - System::set_block_number(block); +fn init_block(block: u64, maybe_parent_hash: Option) { + let parent_hash = maybe_parent_hash.unwrap_or(H256::repeat_byte(block as u8)); + System::initialize(&block, &parent_hash, &Default::default()); Session::on_initialize(block); Mmr::on_initialize(block); Beefy::on_initialize(block); @@ -61,38 +66,32 @@ fn read_mmr_leaf(ext: &mut TestExternalities, key: Vec) -> MmrLeaf { fn should_contain_mmr_digest() { let mut ext = new_test_ext(vec![1, 2, 3, 4]); ext.execute_with(|| { - init_block(1); - + init_block(1, None); assert_eq!( System::digest().logs, vec![ beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() )), - beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" - ))) + beefy_log(ConsensusLog::MmrRoot(H256::from_slice(&[ + 117, 0, 56, 25, 185, 195, 71, 232, 67, 213, 27, 178, 64, 168, 137, 220, 64, + 184, 64, 240, 83, 245, 18, 93, 185, 202, 125, 205, 17, 254, 18, 143 + ]))) ] ); // unique every time - init_block(2); - + init_block(2, None); assert_eq!( System::digest().logs, vec![ - beefy_log(ConsensusLog::AuthoritiesChange( - ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() - )), - beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" - ))), beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(3), mock_beefy_id(4)], 2).unwrap() )), - beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "a73271a0974f1e67d6e9b8dd58e506177a2e556519a330796721e98279a753e2" - ))), + beefy_log(ConsensusLog::MmrRoot(H256::from_slice(&[ + 193, 246, 48, 7, 89, 204, 186, 109, 167, 226, 188, 211, 8, 243, 203, 154, 234, + 235, 136, 210, 245, 7, 209, 27, 241, 90, 156, 113, 137, 65, 191, 139 + ]))), ] ); }); @@ -106,7 +105,7 @@ fn should_contain_valid_leaf_data() { let mut ext = new_test_ext(vec![1, 2, 3, 4]); let parent_hash = ext.execute_with(|| { - init_block(1); + init_block(1, None); frame_system::Pallet::::parent_hash() }); @@ -115,7 +114,7 @@ fn should_contain_valid_leaf_data() { mmr_leaf, MmrLeaf { version: MmrLeafVersion::new(1, 5), - parent_number_and_hash: (0_u64, H256::repeat_byte(0x45)), + parent_number_and_hash: (0_u64, H256::repeat_byte(1)), beefy_next_authority_set: BeefyNextAuthoritySet { id: 2, len: 2, @@ -131,7 +130,7 @@ fn should_contain_valid_leaf_data() { // build second block on top let parent_hash = ext.execute_with(|| { - init_block(2); + init_block(2, None); frame_system::Pallet::::parent_hash() }); @@ -140,7 +139,7 @@ fn should_contain_valid_leaf_data() { mmr_leaf, MmrLeaf { version: MmrLeafVersion::new(1, 5), - parent_number_and_hash: (1_u64, H256::repeat_byte(0x45)), + parent_number_and_hash: (1_u64, H256::repeat_byte(2)), beefy_next_authority_set: BeefyNextAuthoritySet { id: 3, len: 2, @@ -175,7 +174,7 @@ fn should_update_authorities() { assert_eq!(auth_set.keyset_commitment, next_auth_set.keyset_commitment); let announced_set = next_auth_set; - init_block(1); + init_block(1, None); let auth_set = BeefyMmr::authority_set_proof(); let next_auth_set = BeefyMmr::next_authority_set_proof(); @@ -191,7 +190,7 @@ fn should_update_authorities() { assert_eq!(want, next_auth_set.keyset_commitment); let announced_set = next_auth_set; - init_block(2); + init_block(2, None); let auth_set = BeefyMmr::authority_set_proof(); let next_auth_set = BeefyMmr::next_authority_set_proof(); @@ -207,3 +206,176 @@ fn should_update_authorities() { assert_eq!(want, next_auth_set.keyset_commitment); }); } + +#[test] +fn extract_validation_context_should_work_correctly() { + let mut ext = new_test_ext(vec![1, 2]); + + // Register offchain ext. + let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + + ext.execute_with(|| { + init_block(1, None); + let h1 = System::finalize(); + init_block(2, Some(h1.hash())); + let h2 = System::finalize(); + + // Check the MMR root log + let expected_mmr_root: [u8; 32] = array_bytes::hex_n_into_unchecked( + "b2106eff9894288bc212b3a9389caa54efd37962c3a7b71b3b0b06a0911b88a5", + ); + assert_eq!( + System::digest().logs, + vec![beefy_log(ConsensusLog::MmrRoot(H256::from_slice(&expected_mmr_root)))] + ); + + // Make sure that all the info about h2 was stored on-chain + init_block(3, Some(h2.hash())); + + // `extract_validation_context` should return the MMR root when the provided header + // is part of the chain, + assert_eq!( + BeefyMmr::extract_validation_context(h2.clone()), + Some(H256::from_slice(&expected_mmr_root)) + ); + + // `extract_validation_context` should return `None` when the provided header + // is not part of the chain. + let mut fork_h2 = h2; + fork_h2.state_root = H256::repeat_byte(0); + assert_eq!(BeefyMmr::extract_validation_context(fork_h2), None); + }); +} + +#[test] +fn is_non_canonical_should_work_correctly() { + let mut ext = new_test_ext(vec![1, 2]); + + let mut prev_roots = vec![]; + ext.execute_with(|| { + for block_num in 1..=500 { + init_block(block_num, None); + prev_roots.push(Mmr::mmr_root()) + } + }); + ext.persist_offchain_overlay(); + + // Register offchain ext. + let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + + ext.execute_with(|| { + let valid_proof = Mmr::generate_ancestry_proof(250, None).unwrap(); + let mut invalid_proof = valid_proof.clone(); + invalid_proof.items.push((300, Default::default())); + + // The commitment is invalid if it has no MMR root payload and the proof is valid. + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry([0, 0], vec![]), + block_number: 250, + validator_set_id: 0 + }, + valid_proof.clone(), + Mmr::mmr_root(), + ), + true + ); + + // If the `commitment.payload` contains an MMR root that doesn't match the ancestry proof, + // it's non-canonical. + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + H256::repeat_byte(0).encode(), + ), + block_number: 250, + validator_set_id: 0, + }, + valid_proof.clone(), + Mmr::mmr_root(), + ), + true + ); + + // Should return false if the proof is invalid, no matter the payload. + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + H256::repeat_byte(0).encode(), + ), + block_number: 250, + validator_set_id: 0 + }, + invalid_proof, + Mmr::mmr_root(), + ), + false + ); + + // Can't prove that the commitment is non-canonical if the `commitment.block_number` + // doesn't match the ancestry proof. + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + prev_roots[250 - 1].encode(), + ), + block_number: 300, + validator_set_id: 0, + }, + valid_proof, + Mmr::mmr_root(), + ), + false + ); + + // For each previous block, the check: + // - should return false, if the commitment is targeting the canonical chain + // - should return true if the commitment is NOT targeting the canonical chain + for prev_block_number in 1usize..=500 { + let proof = Mmr::generate_ancestry_proof(prev_block_number as u64, None).unwrap(); + + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + prev_roots[prev_block_number - 1].encode(), + ), + block_number: prev_block_number as u64, + validator_set_id: 0, + }, + proof.clone(), + Mmr::mmr_root(), + ), + false + ); + + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + H256::repeat_byte(0).encode(), + ), + block_number: prev_block_number as u64, + validator_set_id: 0, + }, + proof, + Mmr::mmr_root(), + ), + true + ) + } + }); +} diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 890ac1399b9dfcee8d71348d8eac9ff7b3ae2546..8ce3a05985be7dfa383b91c187451f3a4753efd6 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -12,31 +12,31 @@ homepage = "https://substrate.io" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } +scale-info = { features = ["derive", "serde"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-authorship = { path = "../authorship", default-features = false } -pallet-session = { path = "../session", default-features = false } -sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false, features = ["serde"] } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +pallet-session = { workspace = true } +sp-consensus-beefy = { features = ["serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-std = { workspace = true } [dev-dependencies] -frame-election-provider-support = { path = "../election-provider-support" } -pallet-balances = { path = "../balances" } -pallet-offences = { path = "../offences" } -pallet-staking = { path = "../staking" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -pallet-timestamp = { path = "../timestamp" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -sp-staking = { path = "../../primitives/staking" } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-offences = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-state-machine = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/beefy/src/default_weights.rs b/substrate/frame/beefy/src/default_weights.rs index 8042f0c932eb6603c88e1ff5f4525743c347b9bb..70dd3bb02bf1e7f4ea8e6946329dafa207114ca9 100644 --- a/substrate/frame/beefy/src/default_weights.rs +++ b/substrate/frame/beefy/src/default_weights.rs @@ -24,7 +24,11 @@ use frame_support::weights::{ }; impl crate::WeightInfo for () { - fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight { + fn report_voting_equivocation( + votes_count: u32, + validator_count: u32, + max_nominators_per_validator: u32, + ) -> Weight { // we take the validator set count from the membership proof to // calculate the weight but we set a floor of 100 validators. let validator_count = validator_count.max(100) as u64; @@ -37,7 +41,10 @@ impl crate::WeightInfo for () { ) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof - .saturating_add(Weight::from_parts(95u64 * WEIGHT_REF_TIME_PER_MICROS, 0)) + .saturating_add(Weight::from_parts( + (50u64 * WEIGHT_REF_TIME_PER_MICROS).saturating_mul(votes_count as u64), + 0, + )) // report offence .saturating_add(Weight::from_parts(110u64 * WEIGHT_REF_TIME_PER_MICROS, 0)) .saturating_add(Weight::from_parts( @@ -50,6 +57,11 @@ impl crate::WeightInfo for () { .saturating_add(DbWeight::get().reads(2)) } + // TODO: Calculate + fn report_fork_voting(_validator_count: u32, _max_nominators_per_validator: u32) -> Weight { + Weight::MAX + } + fn set_new_genesis() -> Weight { DbWeight::get().writes(1) } diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs index aecc9e721d5c43c938b744b8682f59e96b472970..a1526e7811111366b6f4a3012529f7e81df0e5db 100644 --- a/substrate/frame/beefy/src/equivocation.rs +++ b/substrate/frame/beefy/src/equivocation.rs @@ -36,9 +36,12 @@ use codec::{self as codec, Decode, Encode}; use frame_support::traits::{Get, KeyOwnerProofSystem}; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; use log::{error, info}; -use sp_consensus_beefy::{DoubleVotingProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE}; +use sp_consensus_beefy::{ + check_commitment_signature, AncestryHelper, DoubleVotingProof, ForkVotingProof, + FutureBlockVotingProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE, +}; use sp_runtime::{ transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, @@ -118,18 +121,143 @@ where /// `offchain::SendTransactionTypes`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. -/// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. +/// - Offence reporter for unsigned transactions is fetched via the authorship pallet. pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T, R, P, L)>); /// Equivocation evidence convenience alias. -pub type EquivocationEvidenceFor = ( - DoubleVotingProof< - BlockNumberFor, - ::BeefyId, - <::BeefyId as RuntimeAppPublic>::Signature, - >, - ::KeyOwnerProof, -); +pub enum EquivocationEvidenceFor { + DoubleVotingProof( + DoubleVotingProof< + BlockNumberFor, + T::BeefyId, + ::Signature, + >, + T::KeyOwnerProof, + ), + ForkVotingProof( + ForkVotingProof< + HeaderFor, + T::BeefyId, + >>::Proof, + >, + T::KeyOwnerProof, + ), + FutureBlockVotingProof(FutureBlockVotingProof, T::BeefyId>, T::KeyOwnerProof), +} + +impl EquivocationEvidenceFor { + /// Returns the authority id of the equivocator. + fn offender_id(&self) -> &T::BeefyId { + match self { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, _) => + equivocation_proof.offender_id(), + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => + &equivocation_proof.vote.id, + EquivocationEvidenceFor::FutureBlockVotingProof(equivocation_proof, _) => + &equivocation_proof.vote.id, + } + } + + /// Returns the round number at which the equivocation occurred. + fn round_number(&self) -> &BlockNumberFor { + match self { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, _) => + equivocation_proof.round_number(), + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => + &equivocation_proof.vote.commitment.block_number, + EquivocationEvidenceFor::FutureBlockVotingProof(equivocation_proof, _) => + &equivocation_proof.vote.commitment.block_number, + } + } + + /// Returns the set id at which the equivocation occurred. + fn set_id(&self) -> ValidatorSetId { + match self { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, _) => + equivocation_proof.set_id(), + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => + equivocation_proof.vote.commitment.validator_set_id, + EquivocationEvidenceFor::FutureBlockVotingProof(equivocation_proof, _) => + equivocation_proof.vote.commitment.validator_set_id, + } + } + + /// Returns the set id at which the equivocation occurred. + fn key_owner_proof(&self) -> &T::KeyOwnerProof { + match self { + EquivocationEvidenceFor::DoubleVotingProof(_, key_owner_proof) => key_owner_proof, + EquivocationEvidenceFor::ForkVotingProof(_, key_owner_proof) => key_owner_proof, + EquivocationEvidenceFor::FutureBlockVotingProof(_, key_owner_proof) => key_owner_proof, + } + } + + fn checked_offender

(&self) -> Option + where + P: KeyOwnerProofSystem<(KeyTypeId, T::BeefyId), Proof = T::KeyOwnerProof>, + { + let key = (BEEFY_KEY_TYPE, self.offender_id().clone()); + P::check_proof(key, self.key_owner_proof().clone()) + } + + fn check_equivocation_proof(self) -> Result<(), Error> { + match self { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, _) => { + // Validate equivocation proof (check votes are different and signatures are valid). + if !sp_consensus_beefy::check_double_voting_proof(&equivocation_proof) { + return Err(Error::::InvalidDoubleVotingProof); + } + + return Ok(()) + }, + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => { + let ForkVotingProof { vote, ancestry_proof, header } = equivocation_proof; + + let maybe_validation_context = , + >>::extract_validation_context(header); + let validation_context = match maybe_validation_context { + Some(validation_context) => validation_context, + None => { + return Err(Error::::InvalidForkVotingProof); + }, + }; + + let is_non_canonical = + >>::is_non_canonical( + &vote.commitment, + ancestry_proof, + validation_context, + ); + if !is_non_canonical { + return Err(Error::::InvalidForkVotingProof); + } + + let is_signature_valid = + check_commitment_signature(&vote.commitment, &vote.id, &vote.signature); + if !is_signature_valid { + return Err(Error::::InvalidForkVotingProof); + } + + Ok(()) + }, + EquivocationEvidenceFor::FutureBlockVotingProof(equivocation_proof, _) => { + let FutureBlockVotingProof { vote } = equivocation_proof; + // Check if the commitment actually targets a future block + if vote.commitment.block_number < frame_system::Pallet::::block_number() { + return Err(Error::::InvalidFutureBlockVotingProof); + } + + let is_signature_valid = + check_commitment_signature(&vote.commitment, &vote.id, &vote.signature); + if !is_signature_valid { + return Err(Error::::InvalidForkVotingProof); + } + + Ok(()) + }, + } + } +} impl OffenceReportSystem, EquivocationEvidenceFor> for EquivocationReportSystem @@ -148,13 +276,8 @@ where fn publish_evidence(evidence: EquivocationEvidenceFor) -> Result<(), ()> { use frame_system::offchain::SubmitTransaction; - let (equivocation_proof, key_owner_proof) = evidence; - - let call = Call::report_equivocation_unsigned { - equivocation_proof: Box::new(equivocation_proof), - key_owner_proof, - }; + let call: Call = evidence.into(); let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report."), @@ -166,18 +289,10 @@ where fn check_evidence( evidence: EquivocationEvidenceFor, ) -> Result<(), TransactionValidityError> { - let (equivocation_proof, key_owner_proof) = evidence; - - // Check the membership proof to extract the offender's id - let key = (BEEFY_KEY_TYPE, equivocation_proof.offender_id().clone()); - let offender = P::check_proof(key, key_owner_proof).ok_or(InvalidTransaction::BadProof)?; + let offender = evidence.checked_offender::

().ok_or(InvalidTransaction::BadProof)?; // Check if the offence has already been reported, and if so then we can discard the report. - let time_slot = TimeSlot { - set_id: equivocation_proof.set_id(), - round: *equivocation_proof.round_number(), - }; - + let time_slot = TimeSlot { set_id: evidence.set_id(), round: *evidence.round_number() }; if R::is_known_offence(&[offender], &time_slot) { Err(InvalidTransaction::Stale.into()) } else { @@ -189,47 +304,37 @@ where reporter: Option, evidence: EquivocationEvidenceFor, ) -> Result<(), DispatchError> { - let (equivocation_proof, key_owner_proof) = evidence; let reporter = reporter.or_else(|| pallet_authorship::Pallet::::author()); - let offender = equivocation_proof.offender_id().clone(); - - // We check the equivocation within the context of its set id (and - // associated session) and round. We also need to know the validator - // set count at the time of the offence since it is required to calculate - // the slash amount. - let set_id = equivocation_proof.set_id(); - let round = *equivocation_proof.round_number(); - let session_index = key_owner_proof.session(); - let validator_set_count = key_owner_proof.validator_count(); - // Validate the key ownership proof extracting the id of the offender. - let offender = P::check_proof((BEEFY_KEY_TYPE, offender), key_owner_proof) - .ok_or(Error::::InvalidKeyOwnershipProof)?; + // We check the equivocation within the context of its set id (and associated session). + let set_id = evidence.set_id(); + let round = *evidence.round_number(); + let set_id_session_index = crate::SetIdSession::::get(set_id) + .ok_or(Error::::InvalidEquivocationProofSession)?; - // Validate equivocation proof (check votes are different and signatures are valid). - if !sp_consensus_beefy::check_equivocation_proof(&equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()) - } - - // Check that the session id for the membership proof is within the - // bounds of the set id reported in the equivocation. - let set_id_session_index = - crate::SetIdSession::::get(set_id).ok_or(Error::::InvalidEquivocationProof)?; + // Check that the session id for the membership proof is within the bounds + // of the set id reported in the equivocation. + let key_owner_proof = evidence.key_owner_proof(); + let validator_count = key_owner_proof.validator_count(); + let session_index = key_owner_proof.session(); if session_index != set_id_session_index { - return Err(Error::::InvalidEquivocationProof.into()) + return Err(Error::::InvalidEquivocationProofSession.into()) } + // Validate the key ownership proof extracting the id of the offender. + let offender = + evidence.checked_offender::

().ok_or(Error::::InvalidKeyOwnershipProof)?; + + evidence.check_equivocation_proof()?; + let offence = EquivocationOffence { time_slot: TimeSlot { set_id, round }, session_index, - validator_set_count, + validator_set_count: validator_count, offender, }; - R::report_offence(reporter.into_iter().collect(), offence) - .map_err(|_| Error::::DuplicateOffenceReport)?; - - Ok(()) + .map_err(|_| Error::::DuplicateOffenceReport.into()) } } @@ -239,49 +344,37 @@ where /// unsigned equivocation reports. impl Pallet { pub fn validate_unsigned(source: TransactionSource, call: &Call) -> TransactionValidity { - if let Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } = call { - // discard equivocation report not coming from the local node - match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, - _ => { - log::warn!( - target: LOG_TARGET, - "rejecting unsigned report equivocation transaction because it is not local/in-block." - ); - return InvalidTransaction::Call.into() - }, - } - - let evidence = (*equivocation_proof.clone(), key_owner_proof.clone()); - T::EquivocationReportSystem::check_evidence(evidence)?; - - let longevity = - >::Longevity::get(); - - ValidTransaction::with_tag_prefix("BeefyEquivocation") - // We assign the maximum priority for any equivocation report. - .priority(TransactionPriority::MAX) - // Only one equivocation report for the same offender at the same slot. - .and_provides(( - equivocation_proof.offender_id().clone(), - equivocation_proof.set_id(), - *equivocation_proof.round_number(), - )) - .longevity(longevity) - // We don't propagate this. This can never be included on a remote node. - .propagate(false) - .build() - } else { - InvalidTransaction::Call.into() + // discard equivocation report not coming from the local node + match source { + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, + _ => { + log::warn!( + target: LOG_TARGET, + "rejecting unsigned report equivocation transaction because it is not local/in-block." + ); + return InvalidTransaction::Call.into() + }, } + + let evidence = call.to_equivocation_evidence_for().ok_or(InvalidTransaction::Call)?; + let tag = (evidence.offender_id().clone(), evidence.set_id(), *evidence.round_number()); + T::EquivocationReportSystem::check_evidence(evidence)?; + + let longevity = + >::Longevity::get(); + ValidTransaction::with_tag_prefix("BeefyEquivocation") + // We assign the maximum priority for any equivocation report. + .priority(TransactionPriority::MAX) + // Only one equivocation report for the same offender at the same slot. + .and_provides(tag) + .longevity(longevity) + // We don't propagate this. This can never be included on a remote node. + .propagate(false) + .build() } pub fn pre_dispatch(call: &Call) -> Result<(), TransactionValidityError> { - if let Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } = call { - let evidence = (*equivocation_proof.clone(), key_owner_proof.clone()); - T::EquivocationReportSystem::check_evidence(evidence) - } else { - Err(InvalidTransaction::Call.into()) - } + let evidence = call.to_equivocation_evidence_for().ok_or(InvalidTransaction::Call)?; + T::EquivocationReportSystem::check_evidence(evidence) } } diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs index 63f3e9bb309c6adc452bb27827969de5e1713e89..a49f5d28f455a99560b9fc445f1a64c471aa54cc 100644 --- a/substrate/frame/beefy/src/lib.rs +++ b/substrate/frame/beefy/src/lib.rs @@ -28,7 +28,7 @@ use frame_support::{ }; use frame_system::{ ensure_none, ensure_signed, - pallet_prelude::{BlockNumberFor, OriginFor}, + pallet_prelude::{BlockNumberFor, HeaderFor, OriginFor}, }; use log; use sp_runtime::{ @@ -41,8 +41,9 @@ use sp_staking::{offence::OffenceReportSystem, SessionIndex}; use sp_std::prelude::*; use sp_consensus_beefy::{ - AuthorityIndex, BeefyAuthorityId, ConsensusLog, DoubleVotingProof, OnNewValidatorSet, - ValidatorSet, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, + AncestryHelper, AuthorityIndex, BeefyAuthorityId, ConsensusLog, DoubleVotingProof, + ForkVotingProof, FutureBlockVotingProof, OnNewValidatorSet, ValidatorSet, BEEFY_ENGINE_ID, + GENESIS_AUTHORITY_SET_ID, }; mod default_weights; @@ -98,6 +99,9 @@ pub mod pallet { /// weight MMR root over validators and make it available for Light Clients. type OnNewValidatorSet: OnNewValidatorSet<::BeefyId>; + /// Hook for checking commitment canonicity. + type AncestryHelper: AncestryHelper>; + /// Weights for this pallet. type WeightInfo: WeightInfo; @@ -188,8 +192,14 @@ pub mod pallet { pub enum Error { /// A key ownership proof provided as part of an equivocation report is invalid. InvalidKeyOwnershipProof, - /// An equivocation proof provided as part of an equivocation report is invalid. - InvalidEquivocationProof, + /// A double voting proof provided as part of an equivocation report is invalid. + InvalidDoubleVotingProof, + /// A fork voting proof provided as part of an equivocation report is invalid. + InvalidForkVotingProof, + /// A future block voting proof provided as part of an equivocation report is invalid. + InvalidFutureBlockVotingProof, + /// The session of the equivocation proof is invalid + InvalidEquivocationProofSession, /// A given equivocation report is valid but already previously reported. DuplicateOffenceReport, /// Submitted configuration is invalid. @@ -203,11 +213,11 @@ pub mod pallet { /// against the extracted offender. If both are valid, the offence /// will be reported. #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::report_equivocation( + #[pallet::weight(T::WeightInfo::report_double_voting( key_owner_proof.validator_count(), T::MaxNominators::get(), ))] - pub fn report_equivocation( + pub fn report_double_voting( origin: OriginFor, equivocation_proof: Box< DoubleVotingProof< @@ -222,7 +232,7 @@ pub mod pallet { T::EquivocationReportSystem::process_evidence( Some(reporter), - (*equivocation_proof, key_owner_proof), + EquivocationEvidenceFor::DoubleVotingProof(*equivocation_proof, key_owner_proof), )?; // Waive the fee since the report is valid and beneficial Ok(Pays::No.into()) @@ -238,11 +248,11 @@ pub mod pallet { /// if the block author is defined it will be defined as the equivocation /// reporter. #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::report_equivocation( + #[pallet::weight(T::WeightInfo::report_double_voting( key_owner_proof.validator_count(), T::MaxNominators::get(), ))] - pub fn report_equivocation_unsigned( + pub fn report_double_voting_unsigned( origin: OriginFor, equivocation_proof: Box< DoubleVotingProof< @@ -257,7 +267,7 @@ pub mod pallet { T::EquivocationReportSystem::process_evidence( None, - (*equivocation_proof, key_owner_proof), + EquivocationEvidenceFor::DoubleVotingProof(*equivocation_proof, key_owner_proof), )?; Ok(Pays::No.into()) } @@ -278,6 +288,126 @@ pub mod pallet { GenesisBlock::::put(Some(genesis_block)); Ok(()) } + + /// Report fork voting equivocation. This method will verify the equivocation proof + /// and validate the given key ownership proof against the extracted offender. + /// If both are valid, the offence will be reported. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::report_fork_voting( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] + pub fn report_fork_voting( + origin: OriginFor, + equivocation_proof: Box< + ForkVotingProof< + HeaderFor, + T::BeefyId, + >>::Proof, + >, + >, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResultWithPostInfo { + let reporter = ensure_signed(origin)?; + + T::EquivocationReportSystem::process_evidence( + Some(reporter), + EquivocationEvidenceFor::ForkVotingProof(*equivocation_proof, key_owner_proof), + )?; + // Waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) + } + + /// Report fork voting equivocation. This method will verify the equivocation proof + /// and validate the given key ownership proof against the extracted offender. + /// If both are valid, the offence will be reported. + /// + /// This extrinsic must be called unsigned and it is expected that only + /// block authors will call it (validated in `ValidateUnsigned`), as such + /// if the block author is defined it will be defined as the equivocation + /// reporter. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::report_fork_voting( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] + pub fn report_fork_voting_unsigned( + origin: OriginFor, + equivocation_proof: Box< + ForkVotingProof< + HeaderFor, + T::BeefyId, + >>::Proof, + >, + >, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + + T::EquivocationReportSystem::process_evidence( + None, + EquivocationEvidenceFor::ForkVotingProof(*equivocation_proof, key_owner_proof), + )?; + // Waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) + } + + /// Report future block voting equivocation. This method will verify the equivocation proof + /// and validate the given key ownership proof against the extracted offender. + /// If both are valid, the offence will be reported. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::report_fork_voting( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] + pub fn report_future_block_voting( + origin: OriginFor, + equivocation_proof: Box, T::BeefyId>>, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResultWithPostInfo { + let reporter = ensure_signed(origin)?; + + T::EquivocationReportSystem::process_evidence( + Some(reporter), + EquivocationEvidenceFor::FutureBlockVotingProof( + *equivocation_proof, + key_owner_proof, + ), + )?; + // Waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) + } + + /// Report future block voting equivocation. This method will verify the equivocation proof + /// and validate the given key ownership proof against the extracted offender. + /// If both are valid, the offence will be reported. + /// + /// This extrinsic must be called unsigned and it is expected that only + /// block authors will call it (validated in `ValidateUnsigned`), as such + /// if the block author is defined it will be defined as the equivocation + /// reporter. + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::report_fork_voting( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] + pub fn report_future_block_voting_unsigned( + origin: OriginFor, + equivocation_proof: Box, T::BeefyId>>, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + + T::EquivocationReportSystem::process_evidence( + None, + EquivocationEvidenceFor::FutureBlockVotingProof( + *equivocation_proof, + key_owner_proof, + ), + )?; + // Waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) + } } #[pallet::hooks] @@ -300,6 +430,48 @@ pub mod pallet { Self::validate_unsigned(source, call) } } + + impl Call { + pub fn to_equivocation_evidence_for(&self) -> Option> { + match self { + Call::report_double_voting_unsigned { equivocation_proof, key_owner_proof } => + Some(EquivocationEvidenceFor::::DoubleVotingProof( + *equivocation_proof.clone(), + key_owner_proof.clone(), + )), + Call::report_fork_voting_unsigned { equivocation_proof, key_owner_proof } => + Some(EquivocationEvidenceFor::::ForkVotingProof( + *equivocation_proof.clone(), + key_owner_proof.clone(), + )), + _ => None, + } + } + } + + impl From> for Call { + fn from(evidence: EquivocationEvidenceFor) -> Self { + match evidence { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, key_owner_proof) => + Call::report_double_voting_unsigned { + equivocation_proof: Box::new(equivocation_proof), + key_owner_proof, + }, + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, key_owner_proof) => + Call::report_fork_voting_unsigned { + equivocation_proof: Box::new(equivocation_proof), + key_owner_proof, + }, + EquivocationEvidenceFor::FutureBlockVotingProof( + equivocation_proof, + key_owner_proof, + ) => Call::report_future_block_voting_unsigned { + equivocation_proof: Box::new(equivocation_proof), + key_owner_proof, + }, + } + } + } } #[cfg(any(feature = "try-runtime", test))] @@ -367,7 +539,7 @@ impl Pallet { /// Submits an extrinsic to report an equivocation. This method will create /// an unsigned extrinsic with a call to `report_equivocation_unsigned` and /// will push the transaction to the pool. Only useful in an offchain context. - pub fn submit_unsigned_equivocation_report( + pub fn submit_unsigned_double_voting_report( equivocation_proof: DoubleVotingProof< BlockNumberFor, T::BeefyId, @@ -375,7 +547,11 @@ impl Pallet { >, key_owner_proof: T::KeyOwnerProof, ) -> Option<()> { - T::EquivocationReportSystem::publish_evidence((equivocation_proof, key_owner_proof)).ok() + T::EquivocationReportSystem::publish_evidence(EquivocationEvidenceFor::DoubleVotingProof( + equivocation_proof, + key_owner_proof, + )) + .ok() } fn change_authorities( @@ -526,6 +702,20 @@ impl IsMember for Pallet { } pub trait WeightInfo { - fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight; + fn report_voting_equivocation( + votes_count: u32, + validator_count: u32, + max_nominators_per_validator: u32, + ) -> Weight; + fn report_double_voting(validator_count: u32, max_nominators_per_validator: u32) -> Weight { + Self::report_voting_equivocation(2, validator_count, max_nominators_per_validator) + } + fn report_fork_voting(validator_count: u32, max_nominators_per_validator: u32) -> Weight; + fn report_future_block_voting( + validator_count: u32, + max_nominators_per_validator: u32, + ) -> Weight { + Self::report_voting_equivocation(1, validator_count, max_nominators_per_validator) + } fn set_new_genesis() -> Weight; } diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 0b87de6bf5d79a3884d96f6f7cf79fc96ab3568b..03efccff7643003885dcb0ad0d6e67d77966f11f 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use codec::{Decode, Encode}; +use scale_info::TypeInfo; use std::vec; use frame_election_provider_support::{ @@ -28,8 +30,12 @@ use frame_support::{ use pallet_session::historical as pallet_session_historical; use sp_core::{crypto::KeyTypeId, ConstU128}; use sp_runtime::{ - app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, testing::TestXt, - traits::OpaqueKeys, BuildStorage, Perbill, + app_crypto::ecdsa::Public, + curve::PiecewiseLinear, + impl_opaque_keys, + testing::TestXt, + traits::{Header as HeaderT, OpaqueKeys}, + BuildStorage, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; use sp_state_machine::BasicExternalities; @@ -37,6 +43,7 @@ use sp_state_machine::BasicExternalities; use crate as pallet_beefy; pub use sp_consensus_beefy::{ecdsa_crypto::AuthorityId as BeefyId, ConsensusLog, BEEFY_ENGINE_ID}; +use sp_consensus_beefy::{AncestryHelper, Commitment}; impl_opaque_keys! { pub struct MockSessionKeys { @@ -75,11 +82,46 @@ where type Extrinsic = TestXt; } +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct MockAncestryProofContext { + pub is_valid: bool, +} + +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct MockAncestryProof { + pub is_non_canonical: bool, +} + parameter_types! { pub const Period: u64 = 1; pub const ReportLongevity: u64 = BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * Period::get(); pub const MaxSetIdSessionEntries: u32 = BondingDuration::get() * SessionsPerEra::get(); + + pub storage AncestryProofContext: Option = Some( + MockAncestryProofContext { + is_valid: true, + } + ); +} + +pub struct MockAncestryHelper; + +impl AncestryHelper

for MockAncestryHelper { + type Proof = MockAncestryProof; + type ValidationContext = MockAncestryProofContext; + + fn extract_validation_context(_header: Header) -> Option { + AncestryProofContext::get() + } + + fn is_non_canonical( + _commitment: &Commitment, + proof: Self::Proof, + context: Self::ValidationContext, + ) -> bool { + context.is_valid && proof.is_non_canonical + } } impl pallet_beefy::Config for Test { @@ -88,6 +130,7 @@ impl pallet_beefy::Config for Test { type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type OnNewValidatorSet = (); + type AncestryHelper = MockAncestryHelper; type WeightInfo = (); type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -120,20 +163,11 @@ impl pallet_authorship::Config for Test { type EventHandler = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - type FreezeIdentifier = (); - type MaxFreezes = (); } impl pallet_timestamp::Config for Test { @@ -171,35 +205,19 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBoundsOnChain; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { - type RewardRemainder = (); - type CurrencyToVote = (); type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type CurrencyBalance = ::Balance; - type Slash = (); - type Reward = (); - type SessionsPerEra = SessionsPerEra; - type BondingDuration = BondingDuration; - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxExposurePageSize = ConstU32<64>; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index 6a6aa245ce1f9051f7e310510c6110ce34f11bc9..a63b3532b6983402866728f558bf1d5cd1a4fc14 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -20,18 +20,22 @@ use std::vec; use frame_support::{ assert_err, assert_ok, - dispatch::{GetDispatchInfo, Pays}, + dispatch::{DispatchResultWithPostInfo, Pays}, traits::{Currency, KeyOwnerProofSystem, OnInitialize}, }; use sp_consensus_beefy::{ - check_equivocation_proof, + check_double_voting_proof, ecdsa_crypto, known_payloads::MMR_ROOT_ID, - test_utils::{generate_equivocation_proof, Keyring as BeefyKeyring}, - Payload, ValidatorSet, KEY_TYPE as BEEFY_KEY_TYPE, + test_utils::{ + generate_double_voting_proof, generate_fork_voting_proof, + generate_future_block_voting_proof, Keyring as BeefyKeyring, + }, + Payload, ValidatorSet, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE, }; use sp_runtime::DigestItem; +use sp_session::MembershipProof; -use crate::{self as beefy, mock::*, Call, Config, Error, Weight, WeightInfo}; +use crate::{self as beefy, mock::*, Call, Config, Error, WeightInfo}; fn init_block(block: u64) { System::set_block_number(block); @@ -222,51 +226,90 @@ fn should_sign_and_verify() { // generate an equivocation proof, with two votes in the same round for // same payload signed by the same key - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1.clone(), set_id, &BeefyKeyring::Bob), (1, payload1.clone(), set_id, &BeefyKeyring::Bob), ); // expect invalid equivocation proof - assert!(!check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(!check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); // generate an equivocation proof, with two votes in different rounds for // different payloads signed by the same key - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1.clone(), set_id, &BeefyKeyring::Bob), (2, payload2.clone(), set_id, &BeefyKeyring::Bob), ); // expect invalid equivocation proof - assert!(!check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(!check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); // generate an equivocation proof, with two votes by different authorities - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1.clone(), set_id, &BeefyKeyring::Alice), (1, payload2.clone(), set_id, &BeefyKeyring::Bob), ); // expect invalid equivocation proof - assert!(!check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(!check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); // generate an equivocation proof, with two votes in different set ids - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1.clone(), set_id, &BeefyKeyring::Bob), (1, payload2.clone(), set_id + 1, &BeefyKeyring::Bob), ); // expect invalid equivocation proof - assert!(!check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(!check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); // generate an equivocation proof, with two votes in the same round for // different payloads signed by the same key let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1, set_id, &BeefyKeyring::Bob), (1, payload2, set_id, &BeefyKeyring::Bob), ); // expect valid equivocation proof - assert!(check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); } -#[test] -fn report_equivocation_current_set_works() { +trait ReportEquivocationFn: + FnMut( + u64, + ValidatorSetId, + &BeefyKeyring, + MembershipProof, +) -> DispatchResultWithPostInfo +{ +} + +impl ReportEquivocationFn for F where + F: FnMut( + u64, + ValidatorSetId, + &BeefyKeyring, + MembershipProof, + ) -> DispatchResultWithPostInfo +{ +} + +fn report_double_voting( + block_num: u64, + set_id: ValidatorSetId, + equivocation_keyring: &BeefyKeyring, + key_owner_proof: MembershipProof, +) -> DispatchResultWithPostInfo { + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + let equivocation_proof = generate_double_voting_proof( + (block_num, payload1, set_id, &equivocation_keyring), + (block_num, payload2, set_id, &equivocation_keyring), + ); + + Beefy::report_double_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) +} + +fn report_equivocation_current_set_works(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -297,24 +340,11 @@ fn report_equivocation_current_set_works() { let equivocation_key = &authorities[equivocation_authority_index]; let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof, with two votes in the same round for - // different payloads signed by the same key - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), - ); - // create the key ownership proof let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); // report the equivocation and the tx should be dispatched successfully - assert_ok!(Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ),); + assert_ok!(f(block_num, set_id, &equivocation_keyring, key_owner_proof)); start_era(2); @@ -345,8 +375,7 @@ fn report_equivocation_current_set_works() { }); } -#[test] -fn report_equivocation_old_set_works() { +fn report_equivocation_old_set_works(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -384,20 +413,8 @@ fn report_equivocation_old_set_works() { let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof for the old set, - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, old_set_id, &equivocation_keyring), - (block_num, payload2, old_set_id, &equivocation_keyring), - ); - // report the equivocation and the tx should be dispatched successfully - assert_ok!(Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ),); + assert_ok!(f(block_num, old_set_id, &equivocation_keyring, key_owner_proof)); start_era(3); @@ -428,8 +445,7 @@ fn report_equivocation_old_set_works() { }); } -#[test] -fn report_equivocation_invalid_set_id() { +fn report_equivocation_invalid_set_id(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -446,28 +462,15 @@ fn report_equivocation_invalid_set_id() { let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation for a future set - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id + 1, &equivocation_keyring), - (block_num, payload2, set_id + 1, &equivocation_keyring), - ); - // the call for reporting the equivocation should error assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ), - Error::::InvalidEquivocationProof, + f(block_num, set_id + 1, &equivocation_keyring, key_owner_proof), + Error::::InvalidEquivocationProofSession, ); }); } -#[test] -fn report_equivocation_invalid_session() { +fn report_equivocation_invalid_session(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -488,29 +491,16 @@ fn report_equivocation_invalid_session() { let set_id = Beefy::validator_set().unwrap().id(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof at following era set id = 2 - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), - ); - // report an equivocation for the current set using an key ownership // proof from the previous set, the session should be invalid. assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ), - Error::::InvalidEquivocationProof, + f(block_num, set_id + 1, &equivocation_keyring, key_owner_proof), + Error::::InvalidEquivocationProofSession, ); }); } -#[test] -fn report_equivocation_invalid_key_owner_proof() { +fn report_equivocation_invalid_key_owner_proof(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -532,14 +522,6 @@ fn report_equivocation_invalid_key_owner_proof() { let equivocation_key = &authorities[equivocation_authority_index]; let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof for the authority at index 0 - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id + 1, &equivocation_keyring), - (block_num, payload2, set_id + 1, &equivocation_keyring), - ); - // we need to start a new era otherwise the key ownership proof won't be // checked since the authorities are part of the current session start_era(2); @@ -547,18 +529,81 @@ fn report_equivocation_invalid_key_owner_proof() { // report an equivocation for the current set using a key ownership // proof for a different key than the one in the equivocation proof. assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - invalid_key_owner_proof, - ), + f(block_num, set_id, &equivocation_keyring, invalid_key_owner_proof), Error::::InvalidKeyOwnershipProof, ); }); } +fn valid_equivocation_reports_dont_pay_fees(mut f: impl ReportEquivocationFn) { + let authorities = test_authorities(); + + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { + start_era(1); + + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + // create the key ownership proof. + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + + // report the equivocation. + let post_info = + f(block_num, set_id, &equivocation_keyring, key_owner_proof.clone()).unwrap(); + + // the original weight should be kept, but given that the report + // is valid the fee is waived. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::No); + + // report the equivocation again which is invalid now since it is + // duplicate. + let post_info = f(block_num, set_id, &equivocation_keyring, key_owner_proof) + .err() + .unwrap() + .post_info; + + // the fee is not waived and the original weight is kept. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::Yes); + }) +} + +// Test double voting reporting logic. + #[test] -fn report_equivocation_invalid_equivocation_proof() { +fn report_double_voting_current_set_works() { + report_equivocation_current_set_works(report_double_voting); +} + +#[test] +fn report_double_voting_old_set_works() { + report_equivocation_old_set_works(report_double_voting); +} + +#[test] +fn report_double_voting_invalid_set_id() { + report_equivocation_invalid_set_id(report_double_voting); +} + +#[test] +fn report_double_voting_invalid_session() { + report_equivocation_invalid_session(report_double_voting); +} + +#[test] +fn report_double_voting_invalid_key_owner_proof() { + report_equivocation_invalid_key_owner_proof(report_double_voting); +} + +#[test] +fn report_double_voting_invalid_equivocation_proof() { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -578,12 +623,12 @@ fn report_equivocation_invalid_equivocation_proof() { let assert_invalid_equivocation_proof = |equivocation_proof| { assert_err!( - Beefy::report_equivocation_unsigned( + Beefy::report_double_voting_unsigned( RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof.clone(), ), - Error::::InvalidEquivocationProof, + Error::::InvalidDoubleVotingProof, ); }; @@ -594,31 +639,31 @@ fn report_equivocation_invalid_equivocation_proof() { // both votes target the same block number and payload, // there is no equivocation. - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1.clone(), set_id, &equivocation_keyring), (block_num, payload1.clone(), set_id, &equivocation_keyring), )); // votes targeting different rounds, there is no equivocation. - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1.clone(), set_id, &equivocation_keyring), (block_num + 1, payload2.clone(), set_id, &equivocation_keyring), )); // votes signed with different authority keys - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1.clone(), set_id, &equivocation_keyring), (block_num, payload1.clone(), set_id, &BeefyKeyring::Charlie), )); // votes signed with a key that isn't part of the authority set - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1.clone(), set_id, &equivocation_keyring), (block_num, payload1.clone(), set_id, &BeefyKeyring::Dave), )); // votes targeting different set ids - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1, set_id, &equivocation_keyring), (block_num, payload2, set_id + 1, &equivocation_keyring), )); @@ -626,7 +671,7 @@ fn report_equivocation_invalid_equivocation_proof() { } #[test] -fn report_equivocation_validate_unsigned_prevents_duplicates() { +fn report_double_voting_validate_unsigned_prevents_duplicates() { use sp_runtime::transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, ValidTransaction, @@ -649,14 +694,14 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (block_num, payload1, set_id, &equivocation_keyring), (block_num, payload2, set_id, &equivocation_keyring), ); let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - let call = Call::report_equivocation_unsigned { + let call = Call::report_double_voting_unsigned { equivocation_proof: Box::new(equivocation_proof.clone()), key_owner_proof: key_owner_proof.clone(), }; @@ -691,7 +736,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { assert_ok!(::pre_dispatch(&call)); // we submit the report - Beefy::report_equivocation_unsigned( + Beefy::report_double_voting_unsigned( RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, @@ -716,11 +761,11 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { } #[test] -fn report_equivocation_has_valid_weight() { +fn report_double_voting_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. assert!((1..=100) - .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) + .map(|validators| ::WeightInfo::report_double_voting(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0] == w[1])); @@ -728,20 +773,75 @@ fn report_equivocation_has_valid_weight() { // after 100 validators the weight should keep increasing // with every extra validator. assert!((100..=1000) - .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) + .map(|validators| ::WeightInfo::report_double_voting(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0].ref_time() < w[1].ref_time())); } #[test] -fn valid_equivocation_reports_dont_pay_fees() { +fn valid_double_voting_reports_dont_pay_fees() { + valid_equivocation_reports_dont_pay_fees(report_double_voting) +} + +// Test fork voting reporting logic. + +fn report_fork_voting( + block_num: u64, + set_id: ValidatorSetId, + equivocation_keyring: &BeefyKeyring, + key_owner_proof: MembershipProof, +) -> DispatchResultWithPostInfo { + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let equivocation_proof = generate_fork_voting_proof( + (block_num, payload, set_id, &equivocation_keyring), + MockAncestryProof { is_non_canonical: true }, + System::finalize(), + ); + + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) +} + +#[test] +fn report_fork_voting_current_set_works() { + report_equivocation_current_set_works(report_fork_voting); +} + +#[test] +fn report_fork_voting_old_set_works() { + report_equivocation_old_set_works(report_fork_voting); +} + +#[test] +fn report_fork_voting_invalid_set_id() { + report_equivocation_invalid_set_id(report_fork_voting); +} + +#[test] +fn report_fork_voting_invalid_session() { + report_equivocation_invalid_session(report_fork_voting); +} + +#[test] +fn report_fork_voting_invalid_key_owner_proof() { + report_equivocation_invalid_key_owner_proof(report_fork_voting); +} + +#[test] +fn report_fork_voting_invalid_equivocation_proof() { let authorities = test_authorities(); - ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { - start_era(1); + let mut ext = ExtBuilder::default().add_authorities(authorities).build(); + let mut era = 1; + let (block_num, set_id, equivocation_keyring, key_owner_proof) = ext.execute_with(|| { + start_era(era); let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); let authorities = validator_set.validators(); let set_id = validator_set.id(); @@ -750,56 +850,224 @@ fn valid_equivocation_reports_dont_pay_fees() { let equivocation_key = &authorities[equivocation_authority_index]; let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - // generate equivocation proof - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), + // generate a key ownership proof at set id in era 1 + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + + era += 1; + start_era(era); + (block_num, set_id, equivocation_keyring, key_owner_proof) + }); + ext.persist_offchain_overlay(); + + ext.execute_with(|| { + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + + // vote signed with a key that isn't part of the authority set + let equivocation_proof = generate_fork_voting_proof( + (block_num, payload.clone(), set_id, &BeefyKeyring::Dave), + MockAncestryProof { is_non_canonical: true }, + System::finalize(), + ); + assert_err!( + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof.clone(), + ), + Error::::InvalidKeyOwnershipProof, ); - // create the key ownership proof. - let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + // Simulate InvalidForkVotingProof error. + let equivocation_proof = generate_fork_voting_proof( + (block_num + 1, payload.clone(), set_id, &equivocation_keyring), + MockAncestryProof { is_non_canonical: false }, + System::finalize(), + ); + assert_err!( + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof.clone(), + ), + Error::::InvalidForkVotingProof, + ); + }); +} - // check the dispatch info for the call. - let info = Call::::report_equivocation_unsigned { - equivocation_proof: Box::new(equivocation_proof.clone()), - key_owner_proof: key_owner_proof.clone(), +#[test] +fn report_fork_voting_invalid_context() { + let authorities = test_authorities(); + + let mut ext = ExtBuilder::default().add_authorities(authorities).build(); + + let mut era = 1; + let block_num = ext.execute_with(|| { + assert_eq!(Staking::current_era(), Some(0)); + assert_eq!(Session::current_index(), 0); + start_era(era); + + let block_num = System::block_number(); + era += 1; + start_era(era); + block_num + }); + ext.persist_offchain_overlay(); + + ext.execute_with(|| { + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + let validators = Session::validators(); + + // make sure that all validators have the same balance + for validator in &validators { + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); + + assert_eq!( + Staking::eras_stakers(era, validator), + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, + ); } - .get_dispatch_info(); - // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.any_gt(Weight::zero())); - assert_eq!(info.pays_fee, Pays::Yes); + assert_eq!(authorities.len(), 2); + let equivocation_authority_index = 1; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - // report the equivocation. - let post_info = Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof.clone()), - key_owner_proof.clone(), - ) - .unwrap(); + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - // the original weight should be kept, but given that the report - // is valid the fee is waived. - assert!(post_info.actual_weight.is_none()); - assert_eq!(post_info.pays_fee, Pays::No); + // generate a fork equivocation proof, with a vote in the same round for a + // different payload than finalized + let equivocation_proof = generate_fork_voting_proof( + (block_num, payload, set_id, &equivocation_keyring), + MockAncestryProof { is_non_canonical: true }, + System::finalize(), + ); - // report the equivocation again which is invalid now since it is - // duplicate. - let post_info = Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ) - .err() - .unwrap() - .post_info; + // create the key ownership proof + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - // the fee is not waived and the original weight is kept. - assert!(post_info.actual_weight.is_none()); - assert_eq!(post_info.pays_fee, Pays::Yes); - }) + // report an equivocation for the current set. Simulate a failure of + // `extract_validation_context` + AncestryProofContext::set(&None); + assert_err!( + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof.clone()), + key_owner_proof.clone(), + ), + Error::::InvalidForkVotingProof, + ); + + // report an equivocation for the current set. Simulate an invalid context. + AncestryProofContext::set(&Some(MockAncestryProofContext { is_valid: false })); + assert_err!( + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ), + Error::::InvalidForkVotingProof, + ); + }); +} + +#[test] +fn valid_fork_voting_reports_dont_pay_fees() { + valid_equivocation_reports_dont_pay_fees(report_fork_voting) +} + +// Test future block voting reporting logic. + +fn report_future_block_voting( + block_num: u64, + set_id: ValidatorSetId, + equivocation_keyring: &BeefyKeyring, + key_owner_proof: MembershipProof, +) -> DispatchResultWithPostInfo { + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let equivocation_proof = generate_future_block_voting_proof(( + block_num + 100, + payload, + set_id, + &equivocation_keyring, + )); + + Beefy::report_future_block_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) +} + +#[test] +fn report_future_block_voting_current_set_works() { + report_equivocation_current_set_works(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_old_set_works() { + report_equivocation_old_set_works(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_invalid_set_id() { + report_equivocation_invalid_set_id(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_invalid_session() { + report_equivocation_invalid_session(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_invalid_key_owner_proof() { + report_equivocation_invalid_key_owner_proof(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_invalid_equivocation_proof() { + let authorities = test_authorities(); + + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { + start_era(1); + + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + // create the key ownership proof + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + + start_era(2); + + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + + // vote targeting old block + assert_err!( + Beefy::report_future_block_voting_unsigned( + RuntimeOrigin::none(), + Box::new(generate_future_block_voting_proof(( + 1, + payload.clone(), + set_id, + &equivocation_keyring, + ))), + key_owner_proof.clone(), + ), + Error::::InvalidFutureBlockVotingProof, + ); + }); +} + +#[test] +fn valid_future_block_voting_reports_dont_pay_fees() { + valid_equivocation_reports_dont_pay_fees(report_future_block_voting) } #[test] diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index b5824ab2ec2eeaa2126bd72f6832e87736a8f914..a2c15951a596673edf2ab6d3485dbefb022e7acb 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -16,29 +16,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -linregress = { version = "0.5.1", optional = true } +codec = { workspace = true } +linregress = { optional = true, workspace = true } log = { workspace = true } -paste = "1.0" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +paste = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-support = { path = "../support", default-features = false } -frame-support-procedural = { path = "../support/procedural", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-storage = { path = "../../primitives/storage", default-features = false } -static_assertions = "1.1.0" +frame-support = { workspace = true } +frame-support-procedural = { workspace = true } +frame-system = { workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +static_assertions = { workspace = true, default-features = true } [dev-dependencies] -array-bytes = "6.2.2" -rusty-fork = { version = "0.3.0", default-features = false } -sp-keystore = { path = "../../primitives/keystore" } +array-bytes = { workspace = true, default-features = true } +rusty-fork = { workspace = true } +sp-keystore = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index e4f3c272a63e4938aa2efd6fa30784543383c82a..4341f011fcbf9c323fbe9b5b7de7d1b86bc71c12 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -15,14 +15,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "..", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index fac0054359060fd3c63741eaa1897c66990f5523..4b242a1a577ced6599a8142acc8d85f8af0f93e2 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -16,22 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-treasury = { path = "../treasury", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-treasury = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs index 3558847c8fedd263e6c57db24195a898a3d0dd6d..f53d95d919d40c6a1b0f53ab3e1ca85443bd8419 100644 --- a/substrate/frame/bounties/src/benchmarking.rs +++ b/substrate/frame/bounties/src/benchmarking.rs @@ -177,7 +177,7 @@ benchmarks_instance_pallet! { Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; let approve_origin = - T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; }: close_bounty(approve_origin, bounty_id) close_bounty_active { @@ -186,7 +186,7 @@ benchmarks_instance_pallet! { Treasury::::on_initialize(BlockNumberFor::::zero()); let bounty_id = BountyCount::::get() - 1; let approve_origin = - T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; }: close_bounty(approve_origin, bounty_id) verify { assert_last_event::(Event::BountyCanceled { index: bounty_id }.into()) @@ -231,5 +231,5 @@ benchmarks_instance_pallet! { } } - impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test) + impl_benchmark_test_suite!(Bounties, crate::tests::ExtBuilder::default().build(), crate::tests::Test) } diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index c930868bf10159326bae978553eadcbd3fe03128..c04bec2d12aea6bb38f94942c66965076058a5e8 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -245,6 +245,9 @@ pub mod pallet { /// The child bounty manager. type ChildBountyManager: ChildBountyManager>; + + /// Handler for the unbalanced decrease when slashing for a rejected bounty. + type OnSlash: OnUnbalanced>; } #[pallet::error] @@ -804,6 +807,54 @@ pub mod pallet { Ok(()) } } + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + } +} + +#[cfg(any(feature = "try-runtime", test))] +impl, I: 'static> Pallet { + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + Self::try_state_bounties_count()?; + + Ok(()) + } + + /// # Invariants + /// + /// * `BountyCount` should be greater or equals to the length of the number of items in + /// `Bounties`. + /// * `BountyCount` should be greater or equals to the length of the number of items in + /// `BountyDescriptions`. + /// * Number of items in `Bounties` should be the same as `BountyDescriptions` length. + fn try_state_bounties_count() -> Result<(), sp_runtime::TryRuntimeError> { + let bounties_length = Bounties::::iter().count() as u32; + + ensure!( + >::get() >= bounties_length, + "`BountyCount` must be grater or equals the number of `Bounties` in storage" + ); + + let bounties_description_length = BountyDescriptions::::iter().count() as u32; + ensure!( + >::get() >= bounties_description_length, + "`BountyCount` must be grater or equals the number of `BountiesDescriptions` in storage." + ); + + ensure!( + bounties_length == bounties_description_length, + "Number of `Bounties` in storage must be the same as the Number of `BountiesDescription` in storage." + ); + Ok(()) + } } impl, I: 'static> Pallet { diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index a89f4ff9fbf301a8d03d9ad040a1b4009e7b18f6..7cd4798267450641eb2ec71da952ae8c7ed8bf27 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -66,23 +66,11 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub static Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2"); @@ -95,13 +83,8 @@ parameter_types! { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -122,13 +105,8 @@ impl pallet_treasury::Config for Test { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId2; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -167,6 +145,7 @@ impl Config for Test { type MaximumReasonLength = ConstU32<16384>; type WeightInfo = (); type ChildBountyManager = (); + type OnSlash = (); } impl Config for Test { @@ -182,23 +161,42 @@ impl Config for Test { type MaximumReasonLength = ConstU32<16384>; type WeightInfo = (); type ChildBountyManager = (); + type OnSlash = (); } type TreasuryError = pallet_treasury::Error; type TreasuryError1 = pallet_treasury::Error; -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { - system: frame_system::GenesisConfig::default(), - balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] }, - treasury: Default::default(), - treasury_1: Default::default(), +pub struct ExtBuilder {} + +impl Default for ExtBuilder { + fn default() -> Self { + Self {} + } +} + +impl ExtBuilder { + pub fn build(self) -> sp_io::TestExternalities { + let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { + system: frame_system::GenesisConfig::default(), + balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] }, + treasury: Default::default(), + treasury_1: Default::default(), + } + .build_storage() + .unwrap() + .into(); + ext.execute_with(|| System::set_block_number(1)); + ext + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + Bounties::do_try_state().expect("All invariants must hold after a test"); + Bounties1::do_try_state().expect("All invariants must hold after a test"); + }) } - .build_storage() - .unwrap() - .into(); - ext.execute_with(|| System::set_block_number(1)); - ext } fn last_event() -> BountiesEvent { @@ -212,7 +210,7 @@ fn last_event() -> BountiesEvent { #[test] fn genesis_config_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { assert_eq!(Treasury::pot(), 0); assert_eq!(Treasury::proposal_count(), 0); }); @@ -220,63 +218,19 @@ fn genesis_config_works() { #[test] fn minting_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // Check that accumulate works when we have Some value in Dummy already. Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); }); } -#[test] -fn spend_proposal_takes_min_deposit() { - new_test_ext().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_eq!(Balances::free_balance(0), 99); - assert_eq!(Balances::reserved_balance(0), 1); - }); -} - -#[test] -fn spend_proposal_takes_proportional_deposit() { - new_test_ext().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - }); -} - -#[test] -fn spend_proposal_fails_when_proposer_poor() { - new_test_ext().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3) - }, - TreasuryError::InsufficientProposersBalance, - ); - }); -} - #[test] fn accepted_spend_proposal_ignored_outside_spend_period() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 100, 3) }); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -286,7 +240,7 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { #[test] fn unused_pot_should_diminish() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let init_total_issuance = Balances::total_issuance(); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::total_issuance(), init_total_issuance + 100); @@ -297,112 +251,13 @@ fn unused_pot_should_diminish() { }); } -#[test] -fn rejected_spend_proposal_ignored_on_spend_period() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - - >::on_initialize(2); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 50); - }); -} - -#[test] -fn reject_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - TreasuryError::InvalidIndex - ); - }); -} - -#[test] -fn reject_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - pallet_treasury::Error::::InvalidIndex - ); - }); -} - -#[test] -fn accept_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - TreasuryError::InvalidIndex - ); - }); -} - -#[test] -fn accept_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - TreasuryError::InvalidIndex - ); - }); -} - #[test] fn accepted_spend_proposal_enacted_on_spend_period() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 100, 3) }); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -412,18 +267,11 @@ fn accepted_spend_proposal_enacted_on_spend_period() { #[test] fn pot_underflow_should_not_diminish() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 150, 3) }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -439,31 +287,17 @@ fn pot_underflow_should_not_diminish() { // i.e. pot should not include existential deposit needed for account survival. #[test] fn treasury_account_doesnt_get_deleted() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), treasury_balance, 3) }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), Treasury::pot(), 3) }); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -486,22 +320,8 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 99, 3) }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 1, 3) }); >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -519,7 +339,7 @@ fn inexistent_account_works() { #[test] fn propose_bounty_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -556,7 +376,7 @@ fn propose_bounty_works() { #[test] fn propose_bounty_validation_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -585,7 +405,7 @@ fn propose_bounty_validation_works() { #[test] fn close_bounty_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_noop!(Bounties::close_bounty(RuntimeOrigin::root(), 0), Error::::InvalidIndex); @@ -610,7 +430,7 @@ fn close_bounty_works() { #[test] fn approve_bounty_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_noop!( @@ -671,7 +491,7 @@ fn approve_bounty_works() { #[test] fn assign_curator_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -741,7 +561,7 @@ fn assign_curator_works() { #[test] fn unassign_curator_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); @@ -794,7 +614,7 @@ fn unassign_curator_works() { #[test] fn award_and_claim_bounty_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); @@ -861,7 +681,7 @@ fn award_and_claim_bounty_works() { #[test] fn claim_handles_high_fee() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 30); @@ -902,7 +722,7 @@ fn claim_handles_high_fee() { #[test] fn cancel_and_refund() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -945,7 +765,7 @@ fn cancel_and_refund() { #[test] fn award_and_cancel() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); @@ -988,7 +808,7 @@ fn award_and_cancel() { #[test] fn expire_and_unassign() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); @@ -1036,7 +856,7 @@ fn expire_and_unassign() { #[test] fn extend_expiry() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); @@ -1172,7 +992,7 @@ fn genesis_funding_works() { #[test] fn unassign_curator_self() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); @@ -1213,7 +1033,7 @@ fn unassign_curator_self() { fn accept_curator_handles_different_deposit_calculations() { // This test will verify that a bounty with and without a fee results // in a different curator deposit: one using the value, and one using the fee. - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // Case 1: With a fee let user = 1; let bounty_index = 0; @@ -1290,7 +1110,7 @@ fn accept_curator_handles_different_deposit_calculations() { #[test] fn approve_bounty_works_second_instance() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // Set burn to 0 to make tracking funds easier. Burn::set(Permill::from_percent(0)); @@ -1316,7 +1136,7 @@ fn approve_bounty_works_second_instance() { #[test] fn approve_bounty_insufficient_spend_limit_errors() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -1334,7 +1154,7 @@ fn approve_bounty_insufficient_spend_limit_errors() { #[test] fn approve_bounty_instance1_insufficient_spend_limit_errors() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury1::account_id(), 101); @@ -1352,7 +1172,7 @@ fn approve_bounty_instance1_insufficient_spend_limit_errors() { #[test] fn propose_curator_insufficient_spend_limit_errors() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -1375,7 +1195,7 @@ fn propose_curator_insufficient_spend_limit_errors() { #[test] fn propose_curator_instance1_insufficient_spend_limit_errors() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 8a84fbfdfb701c87cde1dfb935ad1dcb6cf56e04..77c4883e4a433ffb0e0fb8c376b23bf109948769 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -16,22 +16,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -bitvec = { version = "1.0.0", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +bitvec = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } [dev-dependencies] -sp-io = { path = "../../primitives/io" } -sp-tracing = { path = "../../primitives/tracing" } -pretty_assertions = "1.3.0" +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +pretty_assertions = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 9cb5ad096c83b74e50a77993c2c3e0049ec2c876..3c9ea4cdba4e74da1df2fe343bc8412da2fe5466 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -99,7 +99,7 @@ fn setup_and_start_sale() -> Result { // Assume Leases to be filled for worst case setup_leases::(T::MaxLeasedCores::get(), 1, 10); - Broker::::do_start_sales(10u32.into(), MAX_CORE_COUNT.into()) + Broker::::do_start_sales(10_000_000u32.into(), MAX_CORE_COUNT.into()) .map_err(|_| BenchmarkError::Weightless)?; Ok(T::MaxReservedCores::get() @@ -201,7 +201,7 @@ mod benches { let latest_region_begin = Broker::::latest_timeslice_ready_to_commit(&config); - let initial_price = 10u32.into(); + let initial_price = 10_000_000u32.into(); let origin = T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -214,8 +214,8 @@ mod benches { Event::SaleInitialized { sale_start: 2u32.into(), leadin_length: 1u32.into(), - start_price: 1000u32.into(), - end_price: 10u32.into(), + start_price: 1_000_000_000u32.into(), + end_price: 10_000_000u32.into(), region_begin: latest_region_begin + config.region_length, region_end: latest_region_begin + config.region_length * 2, ideal_cores_sold: 0, @@ -240,13 +240,13 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), 10u32.into()); + _(RawOrigin::Signed(caller.clone()), 10_000_000u32.into()); - assert_eq!(SaleInfo::::get().unwrap().sellout_price, Some(10u32.into())); + assert_eq!(SaleInfo::::get().unwrap().sellout_price, Some(10_000_000u32.into())); assert_last_event::( Event::Purchased { who: caller, @@ -255,7 +255,7 @@ mod benches { core, mask: CoreMask::complete(), }, - price: 10u32.into(), + price: 10_000_000u32.into(), duration: 3u32.into(), } .into(), @@ -274,10 +274,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(20u32.into()), + T::Currency::minimum_balance().saturating_add(20_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; Broker::::do_assign(region, None, 1001, Final) @@ -303,10 +303,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -336,10 +336,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] @@ -368,10 +368,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] @@ -404,10 +404,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] @@ -439,10 +439,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -475,14 +475,14 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); T::Currency::set_balance( &Broker::::account_id(), - T::Currency::minimum_balance().saturating_add(200u32.into()), + T::Currency::minimum_balance().saturating_add(200_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -491,7 +491,7 @@ mod benches { Broker::::do_pool(region, None, recipient.clone(), Final) .map_err(|_| BenchmarkError::Weightless)?; - let revenue = 10u32.into(); + let revenue = 10_000_000u32.into(); InstaPoolHistory::::insert( region.begin, InstaPoolHistoryRecord { @@ -508,7 +508,7 @@ mod benches { assert_last_event::( Event::RevenueClaimPaid { who: recipient, - amount: 200u32.into(), + amount: 200_000_000u32.into(), next: if m < new_config_record::().region_length { Some(RegionId { begin: region.begin.saturating_add(m), @@ -534,11 +534,11 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(30u32.into()), + T::Currency::minimum_balance().saturating_add(30_000_000u32.into()), ); T::Currency::set_balance(&Broker::::account_id(), T::Currency::minimum_balance()); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -549,10 +549,11 @@ mod benches { let beneficiary: RelayAccountIdOf = account("beneficiary", 0, SEED); #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), 20u32.into(), beneficiary.clone()); + _(RawOrigin::Signed(caller.clone()), 20_000_000u32.into(), beneficiary.clone()); assert_last_event::( - Event::CreditPurchased { who: caller, beneficiary, amount: 20u32.into() }.into(), + Event::CreditPurchased { who: caller, beneficiary, amount: 20_000_000u32.into() } + .into(), ); Ok(()) @@ -568,10 +569,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; advance_to::( @@ -602,10 +603,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -634,7 +635,7 @@ mod benches { fn drop_history() -> Result<(), BenchmarkError> { setup_and_start_sale::()?; let when = 5u32.into(); - let revenue = 10u32.into(); + let revenue = 10_000_000u32.into(); let region_len = Configuration::::get().unwrap().region_length; advance_to::( @@ -672,7 +673,7 @@ mod benches { let id = PotentialRenewalId { core, when }; let record = PotentialRenewalRecord { - price: 1u32.into(), + price: 1_000_000u32.into(), completion: CompletionStatus::Complete(new_schedule()), }; PotentialRenewals::::insert(id, record); @@ -732,23 +733,27 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(30u32.into()), + T::Currency::minimum_balance().saturating_add(30_000_000u32.into()), + ); + T::Currency::set_balance( + &Broker::::account_id(), + T::Currency::minimum_balance().saturating_add(90_000_000u32.into()), ); - T::Currency::set_balance(&Broker::::account_id(), T::Currency::minimum_balance()); let timeslice_period: u32 = T::TimeslicePeriod::get().try_into().ok().unwrap(); let multiplicator = 5; - ::ensure_notify_revenue_info( - (timeslice_period * multiplicator).into(), - 10u32.into(), - ); + + RevenueInbox::::put(OnDemandRevenueRecord { + until: (timeslice_period * multiplicator).into(), + amount: 10_000_000u32.into(), + }); let timeslice = multiplicator - 1; InstaPoolHistory::::insert( timeslice, InstaPoolHistoryRecord { - private_contributions: 1u32.into(), - system_contributions: 9u32.into(), + private_contributions: 4u32.into(), + system_contributions: 6u32.into(), maybe_payout: None, }, ); @@ -761,8 +766,8 @@ mod benches { assert_last_event::( Event::ClaimsReady { when: timeslice.into(), - system_payout: 9u32.into(), - private_payout: 1u32.into(), + system_payout: 6_000_000u32.into(), + private_payout: 4_000_000u32.into(), } .into(), ); @@ -776,7 +781,7 @@ mod benches { let config = new_config_record::(); let now = frame_system::Pallet::::block_number(); - let end_price = 10u32.into(); + let end_price = 10_000_000u32.into(); let commit_timeslice = Broker::::latest_timeslice_ready_to_commit(&config); let sale = SaleInfoRecordOf:: { sale_start: now, @@ -815,8 +820,8 @@ mod benches { Event::SaleInitialized { sale_start: 2u32.into(), leadin_length: 1u32.into(), - start_price: 1000u32.into(), - end_price: 10u32.into(), + start_price: 1_000_000_000u32.into(), + end_price: 10_000_000u32.into(), region_begin: sale.region_begin + config.region_length, region_end: sale.region_end + config.region_length, ideal_cores_sold: 0, @@ -889,6 +894,7 @@ mod benches { T::Coretime::request_revenue_info_at(rc_block); } } + #[benchmark] fn notify_core_count() -> Result<(), BenchmarkError> { let admin_origin = @@ -901,6 +907,21 @@ mod benches { Ok(()) } + #[benchmark] + fn notify_revenue() -> Result<(), BenchmarkError> { + let admin_origin = + T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _( + admin_origin as T::RuntimeOrigin, + OnDemandRevenueRecord { until: 100u32.into(), amount: 100_000_000u32.into() }, + ); + + assert!(RevenueInbox::::take().is_some()); + Ok(()) + } + #[benchmark] fn do_tick_base() -> Result<(), BenchmarkError> { setup_and_start_sale::()?; @@ -939,6 +960,31 @@ mod benches { Ok(()) } + #[benchmark] + fn on_new_timeslice() -> Result<(), BenchmarkError> { + setup_and_start_sale::()?; + + advance_to::(2); + + let caller: T::AccountId = whitelisted_caller(); + T::Currency::set_balance( + &caller.clone(), + T::Currency::minimum_balance().saturating_add(u32::MAX.into()), + ); + + let _region = Broker::::do_purchase(caller.clone(), (u32::MAX / 2).into()) + .map_err(|_| BenchmarkError::Weightless)?; + + let timeslice = Broker::::current_timeslice(); + + #[block] + { + T::Coretime::on_new_timeslice(timeslice); + } + + Ok(()) + } + // Implements a test for each benchmark. Execute with: // `cargo test -p pallet-broker --features runtime-benchmarks`. impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/broker/src/coretime_interface.rs b/substrate/frame/broker/src/coretime_interface.rs index 58efa7fa92bb0fa6c29a45375593f2246f5eaa8a..f92f8b22091ac992180fd3f44d93cffddd59061d 100644 --- a/substrate/frame/broker/src/coretime_interface.rs +++ b/substrate/frame/broker/src/coretime_interface.rs @@ -23,7 +23,9 @@ use scale_info::TypeInfo; use sp_arithmetic::traits::AtLeast32BitUnsigned; use sp_core::RuntimeDebug; use sp_runtime::traits::BlockNumberProvider; -use sp_std::vec::Vec; +use sp_std::{fmt::Debug, vec::Vec}; + +use crate::Timeslice; /// Index of a Polkadot Core. pub type CoreIndex = u16; @@ -62,7 +64,7 @@ pub trait CoretimeInterface { type AccountId: Parameter; /// A (Relay-chain-side) balance. - type Balance: AtLeast32BitUnsigned; + type Balance: AtLeast32BitUnsigned + Encode + Decode + MaxEncodedLen + TypeInfo + Debug; /// A provider for the relay chain block number. type RelayChainBlockNumberProvider: BlockNumberProvider; @@ -107,22 +109,10 @@ pub trait CoretimeInterface { end_hint: Option>, ); - /// Provide the amount of revenue accumulated from Instantaneous Coretime Sales from Relay-chain - /// block number `last_until` to `until`, not including `until` itself. `last_until` is defined - /// as being the `until` argument of the last `notify_revenue` message sent, or zero for the - /// first call. If `revenue` is `None`, this indicates that the information is no longer - /// available. - /// - /// This explicitly disregards the possibility of multiple parachains requesting and being - /// notified of revenue information. The Relay-chain must be configured to ensure that only a - /// single revenue information destination exists. - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)>; - - /// Ensure that revenue information is updated to the provided value. - /// - /// This is only used for benchmarking. - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance); + /// A hook supposed to be called right after a new timeslice has begun. Likely to be used for + /// batching different matters happened during the timeslice that may benifit from batched + /// processing. + fn on_new_timeslice(_timeslice: Timeslice) {} } impl CoretimeInterface for () { @@ -140,9 +130,4 @@ impl CoretimeInterface for () { _end_hint: Option>, ) { } - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { - None - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(_when: RCBlockNumberOf, _revenue: Self::Balance) {} } diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 79c1a1f79796330a7377f06ef5f1a62045eb2848..9e7a56e52812e578d12c709a4dafb85dcc0d041a 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -16,6 +16,7 @@ // limitations under the License. use super::*; +use coretime_interface::CoretimeInterface; use frame_support::{ pallet_prelude::{DispatchResult, *}, traits::{fungible::Mutate, tokens::Preservation::Expendable, DefensiveResult}, @@ -458,6 +459,11 @@ impl Pallet { Ok(()) } + pub(crate) fn do_notify_revenue(revenue: OnDemandRevenueRecordOf) -> DispatchResult { + RevenueInbox::::put(revenue); + Ok(()) + } + pub(crate) fn do_swap_leases(id: TaskId, other: TaskId) -> DispatchResult { let mut id_leases_count = 0; let mut other_leases_count = 0; @@ -472,7 +478,6 @@ impl Pallet { } }) }); - Ok(()) } diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index 0774c02e1cf10e7360c89dfc6c1dc11f28b563ac..4cb380bae91d87e1ec32a96d88cfaa1b5fcd97a4 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -57,7 +57,7 @@ pub mod pallet { pallet_prelude::{DispatchResult, DispatchResultWithPostInfo, *}, traits::{ fungible::{Balanced, Credit, Mutate}, - EnsureOrigin, OnUnbalanced, + BuildGenesisConfig, EnsureOrigin, OnUnbalanced, }, PalletId, }; @@ -65,7 +65,7 @@ pub mod pallet { use sp_runtime::traits::{Convert, ConvertBack}; use sp_std::vec::Vec; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -174,6 +174,10 @@ pub mod pallet { #[pallet::storage] pub type CoreCountInbox = StorageValue<_, CoreIndex, OptionQuery>; + /// Received revenue info from the relay chain. + #[pallet::storage] + pub type RevenueInbox = StorageValue<_, OnDemandRevenueRecordOf, OptionQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -488,6 +492,20 @@ pub mod pallet { NoClaimTimeslices, } + #[derive(frame_support::DefaultNoBound)] + #[pallet::genesis_config] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + frame_system::Pallet::::inc_providers(&Pallet::::account_id()); + } + } + #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_now: BlockNumberFor) -> Weight { @@ -804,6 +822,17 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(20)] + #[pallet::weight(T::WeightInfo::notify_revenue())] + pub fn notify_revenue( + origin: OriginFor, + revenue: OnDemandRevenueRecordOf, + ) -> DispatchResult { + T::AdminOrigin::ensure_origin_or_root(origin)?; + Self::do_notify_revenue(revenue)?; + Ok(()) + } + #[pallet::call_index(99)] #[pallet::weight(T::WeightInfo::swap_leases())] pub fn swap_leases(origin: OriginFor, id: TaskId, other: TaskId) -> DispatchResult { diff --git a/substrate/frame/broker/src/migration.rs b/substrate/frame/broker/src/migration.rs index f354e447fe84eba4df56d02e82c69519ba67cf7d..917f44a134cc17bb61ce6dfed0206a505a0f2d21 100644 --- a/substrate/frame/broker/src/migration.rs +++ b/substrate/frame/broker/src/migration.rs @@ -128,6 +128,36 @@ mod v2 { } } +mod v3 { + use super::*; + use frame_system::Pallet as System; + + pub struct MigrateToV3Impl(PhantomData); + + impl UncheckedOnRuntimeUpgrade for MigrateToV3Impl { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let acc = Pallet::::account_id(); + System::::inc_providers(&acc); + // calculate and return migration weights + T::DbWeight::get().writes(1) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + Ok(System::::providers(&Pallet::::account_id()).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let old_providers = u32::decode(&mut &state[..]).expect("Known good"); + let new_providers = System::::providers(&Pallet::::account_id()) as u32; + + ensure!(new_providers == old_providers + 1, "Providers count should increase by one"); + Ok(()) + } + } +} + /// Migrate the pallet storage from `0` to `1`. pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< 0, @@ -144,3 +174,11 @@ pub type MigrateV1ToV2 = frame_support::migrations::VersionedMigration< Pallet, ::DbWeight, >; + +pub type MigrateV2ToV3 = frame_support::migrations::VersionedMigration< + 2, + 3, + v3::MigrateToV3Impl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/broker/src/mock.rs b/substrate/frame/broker/src/mock.rs index 6fff6aa10080c5a888ba35b4c3aeb1d180f9361b..f29651abff7b814791c446c29994e7617c619da1 100644 --- a/substrate/frame/broker/src/mock.rs +++ b/substrate/frame/broker/src/mock.rs @@ -70,7 +70,6 @@ parameter_types! { pub static CoretimeWorkplan: BTreeMap<(u32, CoreIndex), Vec<(CoreAssignment, PartsOf57600)>> = Default::default(); pub static CoretimeUsage: BTreeMap> = Default::default(); pub static CoretimeInPool: CoreMaskBitCount = 0; - pub static NotifyRevenueInfo: Vec<(u32, u64)> = Default::default(); } pub struct TestCoretimeProvider; @@ -90,11 +89,10 @@ impl CoretimeInterface for TestCoretimeProvider { ); } - let when = when as u32; let mut total = 0; CoretimeSpending::mutate(|s| { s.retain(|(n, a)| { - if *n < when { + if *n < when as u32 { total += a; false } else { @@ -102,7 +100,8 @@ impl CoretimeInterface for TestCoretimeProvider { } }) }); - NotifyRevenueInfo::mutate(|s| s.insert(0, (when, total))); + mint_to_pot(total); + RevenueInbox::::put(OnDemandRevenueRecord { until: when, amount: total }); } fn credit_account(who: Self::AccountId, amount: Self::Balance) { CoretimeCredit::mutate(|c| c.entry(who).or_default().saturating_accrue(amount)); @@ -125,19 +124,13 @@ impl CoretimeInterface for TestCoretimeProvider { ); CoretimeTrace::mutate(|v| v.push(item)); } - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { - NotifyRevenueInfo::mutate(|s| s.pop()).map(|v| (v.0 as _, v.1)) - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance) { - NotifyRevenueInfo::mutate(|s| s.push((when as u32, revenue))); - } } + impl TestCoretimeProvider { - pub fn spend_instantaneous(who: u64, price: u64) -> Result<(), ()> { - let mut c = CoretimeCredit::get(); + pub fn spend_instantaneous(_who: u64, price: u64) -> Result<(), ()> { + let c = CoretimeCredit::get(); ensure!(CoretimeInPool::get() > 0, ()); - c.insert(who, c.get(&who).ok_or(())?.checked_sub(price).ok_or(())?); + // c.insert(who, c.get(&who).ok_or(())?.checked_sub(price).ok_or(())?); CoretimeCredit::set(c); CoretimeSpending::mutate(|v| { v.push((RCBlockNumberProviderOf::::current_block_number() as u32, price)) @@ -223,6 +216,11 @@ pub fn pot() -> u64 { balance(Broker::account_id()) } +pub fn mint_to_pot(amount: u64) { + let imb = ::Currency::issue(amount); + let _ = ::Currency::resolve(&Broker::account_id(), imb); +} + pub fn revenue() -> u64 { balance(0) } diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index e953afd6dc3c8818338d810d0c9d9d37c0b742af..2a8ea24b447ada3660ee9e5852dcf3c9e469f0db 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -451,6 +451,8 @@ fn renewals_affect_price() { #[test] fn instapool_payouts_work() { + // Commented out code is from the reference test implementation and should be uncommented as + // soon as we have the credit system implemented TestExt::new().endow(1, 1000).execute_with(|| { let item = ScheduleItem { assignment: Pool, mask: CoreMask::complete() }; assert_ok!(Broker::do_reserve(Schedule::truncate_from(vec![item]))); @@ -458,11 +460,13 @@ fn instapool_payouts_work() { advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); assert_ok!(Broker::do_pool(region, None, 2, Final)); - assert_ok!(Broker::do_purchase_credit(1, 20, 1)); + // assert_ok!(Broker::do_purchase_credit(1, 20, 1)); advance_to(8); assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); advance_to(11); - assert_eq!(pot(), 14); + // Should get revenue amount 10 from RC, from which 6 is system payout (goes to account0 + // instantly) and the rest is private (kept in the pot until claimed) + assert_eq!(pot(), 4); assert_eq!(revenue(), 106); // Cannot claim for 0 timeslices. @@ -470,13 +474,15 @@ fn instapool_payouts_work() { // Revenue can be claimed. assert_ok!(Broker::do_claim_revenue(region, 100)); - assert_eq!(pot(), 10); + assert_eq!(pot(), 0); assert_eq!(balance(2), 4); }); } #[test] fn instapool_partial_core_payouts_work() { + // Commented out code is from the reference test implementation and should be uncommented as + // soon as we have the credit system implemented TestExt::new().endow(1, 1000).execute_with(|| { let item = ScheduleItem { assignment: Pool, mask: CoreMask::complete() }; assert_ok!(Broker::do_reserve(Schedule::truncate_from(vec![item]))); @@ -487,7 +493,7 @@ fn instapool_partial_core_payouts_work() { Broker::do_interlace(region, None, CoreMask::from_chunk(0, 20)).unwrap(); assert_ok!(Broker::do_pool(region1, None, 2, Final)); assert_ok!(Broker::do_pool(region2, None, 3, Final)); - assert_ok!(Broker::do_purchase_credit(1, 40, 1)); + // assert_ok!(Broker::do_purchase_credit(1, 40, 1)); advance_to(8); assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 40)); advance_to(11); @@ -502,6 +508,8 @@ fn instapool_partial_core_payouts_work() { #[test] fn instapool_core_payouts_work_with_partitioned_region() { + // Commented out code is from the reference test implementation and should be uncommented as + // soon as we have the credit system implemented TestExt::new().endow(1, 1000).execute_with(|| { assert_ok!(Broker::do_start_sales(100, 1)); advance_to(2); @@ -514,14 +522,14 @@ fn instapool_core_payouts_work_with_partitioned_region() { // coretime will be purchased from `region2`. assert_ok!(Broker::do_pool(region1, None, 2, Final)); assert_ok!(Broker::do_pool(region2, None, 3, Final)); - assert_ok!(Broker::do_purchase_credit(1, 20, 1)); + // assert_ok!(Broker::do_purchase_credit(1, 20, 1)); advance_to(8); assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); advance_to(11); - assert_eq!(pot(), 20); + assert_eq!(pot(), 10); assert_eq!(revenue(), 100); assert_ok!(Broker::do_claim_revenue(region1, 100)); - assert_eq!(pot(), 10); + assert_eq!(pot(), 0); assert_eq!(balance(2), 10); advance_to(12); assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 20637cf7b903cf73fb5d9bfe08e70013303bcb1a..3292faa4eb7e398bd603de6fa1108a1e5470f588 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -16,7 +16,7 @@ // limitations under the License. use super::*; -use frame_support::{pallet_prelude::*, weights::WeightMeter}; +use frame_support::{pallet_prelude::*, traits::defensive_prelude::*, weights::WeightMeter}; use sp_arithmetic::traits::{One, SaturatedConversion, Saturating, Zero}; use sp_runtime::traits::ConvertBack; use sp_std::{vec, vec::Vec}; @@ -76,6 +76,8 @@ impl Pallet { let rc_block = T::TimeslicePeriod::get() * status.last_timeslice.into(); T::Coretime::request_revenue_info_at(rc_block); meter.consume(T::WeightInfo::request_revenue_info_at()); + T::Coretime::on_new_timeslice(status.last_timeslice); + meter.consume(T::WeightInfo::on_new_timeslice()); } Status::::put(&status); @@ -93,15 +95,23 @@ impl Pallet { } pub(crate) fn process_revenue() -> bool { - let Some((until, amount)) = T::Coretime::check_notify_revenue_info() else { return false }; + let Some(OnDemandRevenueRecord { until, amount }) = RevenueInbox::::take() else { + return false + }; let when: Timeslice = (until / T::TimeslicePeriod::get()).saturating_sub(One::one()).saturated_into(); - let mut revenue = T::ConvertBalance::convert_back(amount); + let mut revenue = T::ConvertBalance::convert_back(amount.clone()); if revenue.is_zero() { Self::deposit_event(Event::::HistoryDropped { when, revenue }); InstaPoolHistory::::remove(when); return true } + + log::debug!( + target: "pallet_broker::process_revenue", + "Received {amount:?} from RC, converted into {revenue:?} revenue", + ); + let mut r = InstaPoolHistory::::get(when).unwrap_or_default(); if r.maybe_payout.is_some() { Self::deposit_event(Event::::HistoryIgnored { when, revenue }); @@ -112,7 +122,7 @@ impl Pallet { let system_payout = if !total_contrib.is_zero() { let system_payout = revenue.saturating_mul(r.system_contributions.into()) / total_contrib.into(); - let _ = Self::charge(&Self::account_id(), system_payout); + Self::charge(&Self::account_id(), system_payout).defensive_ok(); revenue.saturating_reduce(system_payout); system_payout @@ -120,6 +130,11 @@ impl Pallet { Zero::zero() }; + log::debug!( + target: "pallet_broker::process_revenue", + "Charged {system_payout:?} for system payouts, {revenue:?} remaining for private contributions", + ); + if !revenue.is_zero() && r.private_contributions > 0 { r.maybe_payout = Some(revenue); InstaPoolHistory::::insert(when, &r); diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index 885cac9a5c23d47add53ce4e2eff02da49d3cdc9..dcfa9a77e4f3a4f8f9d21375e860a537b76f9218 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -255,6 +255,21 @@ pub struct LeaseRecordItem { pub type LeasesRecord = BoundedVec; pub type LeasesRecordOf = LeasesRecord<::MaxLeasedCores>; +/// Record for On demand core sales. +/// +/// The blocknumber is the relay chain block height `until` which the original request +/// for revenue was made. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct OnDemandRevenueRecord { + /// The height of the Relay-chain at the time the revenue request was made. + pub until: RelayBlockNumber, + /// The accumulated balance of on demand sales made on the relay chain. + pub amount: RelayBalance, +} + +pub type OnDemandRevenueRecordOf = + OnDemandRevenueRecord, RelayBalanceOf>; + /// Configuration of this pallet. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ConfigRecord { diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index d9d9d348e47e9db76731b41fd65086c689fc86f0..4889c2577ddd80d9eac3652dd239271978416beb 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -75,8 +75,10 @@ pub trait WeightInfo { fn process_core_schedule() -> Weight; fn request_revenue_info_at() -> Weight; fn notify_core_count() -> Weight; + fn notify_revenue() -> Weight; fn do_tick_base() -> Weight; fn swap_leases() -> Weight; + fn on_new_timeslice() -> Weight; } /// Weights for `pallet_broker` using the Substrate node and recommended hardware. @@ -88,8 +90,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_945_000 picoseconds. - Weight::from_parts(2_142_000, 0) + // Minimum execution time: 1_977_000 picoseconds. + Weight::from_parts(2_114_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -98,8 +100,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 16_274_000 picoseconds. - Weight::from_parts(16_828_000, 7496) + // Minimum execution time: 16_880_000 picoseconds. + Weight::from_parts(17_506_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -109,8 +111,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 15_080_000 picoseconds. - Weight::from_parts(15_874_000, 7496) + // Minimum execution time: 15_569_000 picoseconds. + Weight::from_parts(16_123_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -120,8 +122,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 8_761_000 picoseconds. - Weight::from_parts(9_203_000, 1526) + // Minimum execution time: 8_962_000 picoseconds. + Weight::from_parts(9_389_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -144,10 +146,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 26_057_000 picoseconds. - Weight::from_parts(46_673_357, 8499) - // Standard Error: 456 - .saturating_add(Weight::from_parts(2_677, 0).saturating_mul(n.into())) + // Minimum execution time: 27_119_000 picoseconds. + Weight::from_parts(47_930_900, 8499) + // Standard Error: 464 + .saturating_add(Weight::from_parts(2_940, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -165,8 +167,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `651` // Estimated: `2136` - // Minimum execution time: 40_907_000 picoseconds. - Weight::from_parts(42_566_000, 2136) + // Minimum execution time: 42_429_000 picoseconds. + Weight::from_parts(43_538_000, 2136) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -188,8 +190,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `769` // Estimated: `4698` - // Minimum execution time: 65_209_000 picoseconds. - Weight::from_parts(68_604_000, 4698) + // Minimum execution time: 62_957_000 picoseconds. + Weight::from_parts(66_821_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -199,8 +201,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 15_860_000 picoseconds. - Weight::from_parts(16_393_000, 3551) + // Minimum execution time: 16_146_000 picoseconds. + Weight::from_parts(16_775_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -210,8 +212,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 17_651_000 picoseconds. - Weight::from_parts(18_088_000, 3551) + // Minimum execution time: 17_720_000 picoseconds. + Weight::from_parts(18_916_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -221,8 +223,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 18_576_000 picoseconds. - Weight::from_parts(19_810_000, 3551) + // Minimum execution time: 19_088_000 picoseconds. + Weight::from_parts(19_732_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -238,8 +240,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 31_015_000 picoseconds. - Weight::from_parts(31_932_000, 4681) + // Minimum execution time: 30_522_000 picoseconds. + Weight::from_parts(31_573_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -257,8 +259,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 36_473_000 picoseconds. - Weight::from_parts(37_382_000, 5996) + // Minimum execution time: 35_833_000 picoseconds. + Weight::from_parts(36_830_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -273,10 +275,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 64_957_000 picoseconds. - Weight::from_parts(66_024_232, 6196) - // Standard Error: 50_170 - .saturating_add(Weight::from_parts(1_290_632, 0).saturating_mul(m.into())) + // Minimum execution time: 65_882_000 picoseconds. + Weight::from_parts(67_506_904, 6196) + // Standard Error: 49_386 + .saturating_add(Weight::from_parts(1_197_959, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -288,8 +290,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 39_939_000 picoseconds. - Weight::from_parts(40_788_000, 3593) + // Minimum execution time: 41_860_000 picoseconds. + Weight::from_parts(42_478_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -301,8 +303,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 31_709_000 picoseconds. - Weight::from_parts(37_559_000, 3551) + // Minimum execution time: 32_593_000 picoseconds. + Weight::from_parts(35_399_000, 3551) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -316,8 +318,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 42_895_000 picoseconds. - Weight::from_parts(53_945_000, 3533) + // Minimum execution time: 41_934_000 picoseconds. + Weight::from_parts(50_480_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -333,8 +335,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `995` // Estimated: `3593` - // Minimum execution time: 50_770_000 picoseconds. - Weight::from_parts(63_117_000, 3593) + // Minimum execution time: 47_167_000 picoseconds. + Weight::from_parts(54_289_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -346,18 +348,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 33_396_000 picoseconds. - Weight::from_parts(36_247_000, 4698) + // Minimum execution time: 29_755_000 picoseconds. + Weight::from_parts(32_857_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_625_000 picoseconds. - Weight::from_parts(4_011_396, 0) + // Minimum execution time: 3_793_000 picoseconds. + Weight::from_parts(4_086_907, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(60, 0).saturating_mul(n.into())) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -366,13 +370,13 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 6_217_000 picoseconds. - Weight::from_parts(6_608_394, 1487) + // Minimum execution time: 6_262_000 picoseconds. + Weight::from_parts(6_734_896, 1487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -383,10 +387,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `972` - // Estimated: `4437` - // Minimum execution time: 46_853_000 picoseconds. - Weight::from_parts(47_740_000, 4437) + // Measured: `829` + // Estimated: `3593` + // Minimum execution time: 39_812_000 picoseconds. + Weight::from_parts(41_227_000, 3593) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -405,8 +409,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 34_240_000 picoseconds. - Weight::from_parts(35_910_175, 8499) + // Minimum execution time: 34_576_000 picoseconds. + Weight::from_parts(36_303_629, 8499) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -418,8 +422,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 7_083_000 picoseconds. - Weight::from_parts(7_336_000, 3493) + // Minimum execution time: 6_978_000 picoseconds. + Weight::from_parts(7_206_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -431,8 +435,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 15_029_000 picoseconds. - Weight::from_parts(15_567_000, 4681) + // Minimum execution time: 15_063_000 picoseconds. + Weight::from_parts(15_463_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -440,8 +444,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 123_000 picoseconds. - Weight::from_parts(136_000, 0) + // Minimum execution time: 126_000 picoseconds. + Weight::from_parts(157_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -449,8 +453,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_775_000 picoseconds. - Weight::from_parts(1_911_000, 0) + // Minimum execution time: 1_803_000 picoseconds. + Weight::from_parts(1_965_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Broker::RevenueInbox` (r:0 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + fn notify_revenue() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_902_000 picoseconds. + Weight::from_parts(2_116_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -459,16 +473,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `4068` - // Minimum execution time: 11_859_000 picoseconds. - Weight::from_parts(12_214_000, 4068) + // Measured: `441` + // Estimated: `1516` + // Minimum execution time: 9_313_000 picoseconds. + Weight::from_parts(9_699_000, 1516) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -476,11 +490,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 5_864_000 picoseconds. - Weight::from_parts(6_231_000, 1526) + // Minimum execution time: 5_984_000 picoseconds. + Weight::from_parts(6_296_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(268_000, 0) + } } // For backwards compatibility and tests. @@ -491,8 +512,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_945_000 picoseconds. - Weight::from_parts(2_142_000, 0) + // Minimum execution time: 1_977_000 picoseconds. + Weight::from_parts(2_114_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -501,8 +522,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 16_274_000 picoseconds. - Weight::from_parts(16_828_000, 7496) + // Minimum execution time: 16_880_000 picoseconds. + Weight::from_parts(17_506_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -512,8 +533,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 15_080_000 picoseconds. - Weight::from_parts(15_874_000, 7496) + // Minimum execution time: 15_569_000 picoseconds. + Weight::from_parts(16_123_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -523,8 +544,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 8_761_000 picoseconds. - Weight::from_parts(9_203_000, 1526) + // Minimum execution time: 8_962_000 picoseconds. + Weight::from_parts(9_389_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -547,10 +568,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 26_057_000 picoseconds. - Weight::from_parts(46_673_357, 8499) - // Standard Error: 456 - .saturating_add(Weight::from_parts(2_677, 0).saturating_mul(n.into())) + // Minimum execution time: 27_119_000 picoseconds. + Weight::from_parts(47_930_900, 8499) + // Standard Error: 464 + .saturating_add(Weight::from_parts(2_940, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -568,8 +589,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `651` // Estimated: `2136` - // Minimum execution time: 40_907_000 picoseconds. - Weight::from_parts(42_566_000, 2136) + // Minimum execution time: 42_429_000 picoseconds. + Weight::from_parts(43_538_000, 2136) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -591,8 +612,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `769` // Estimated: `4698` - // Minimum execution time: 65_209_000 picoseconds. - Weight::from_parts(68_604_000, 4698) + // Minimum execution time: 62_957_000 picoseconds. + Weight::from_parts(66_821_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -602,8 +623,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 15_860_000 picoseconds. - Weight::from_parts(16_393_000, 3551) + // Minimum execution time: 16_146_000 picoseconds. + Weight::from_parts(16_775_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -613,8 +634,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 17_651_000 picoseconds. - Weight::from_parts(18_088_000, 3551) + // Minimum execution time: 17_720_000 picoseconds. + Weight::from_parts(18_916_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -624,8 +645,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 18_576_000 picoseconds. - Weight::from_parts(19_810_000, 3551) + // Minimum execution time: 19_088_000 picoseconds. + Weight::from_parts(19_732_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -641,8 +662,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 31_015_000 picoseconds. - Weight::from_parts(31_932_000, 4681) + // Minimum execution time: 30_522_000 picoseconds. + Weight::from_parts(31_573_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -660,8 +681,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 36_473_000 picoseconds. - Weight::from_parts(37_382_000, 5996) + // Minimum execution time: 35_833_000 picoseconds. + Weight::from_parts(36_830_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -676,10 +697,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 64_957_000 picoseconds. - Weight::from_parts(66_024_232, 6196) - // Standard Error: 50_170 - .saturating_add(Weight::from_parts(1_290_632, 0).saturating_mul(m.into())) + // Minimum execution time: 65_882_000 picoseconds. + Weight::from_parts(67_506_904, 6196) + // Standard Error: 49_386 + .saturating_add(Weight::from_parts(1_197_959, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -691,8 +712,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 39_939_000 picoseconds. - Weight::from_parts(40_788_000, 3593) + // Minimum execution time: 41_860_000 picoseconds. + Weight::from_parts(42_478_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -704,8 +725,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 31_709_000 picoseconds. - Weight::from_parts(37_559_000, 3551) + // Minimum execution time: 32_593_000 picoseconds. + Weight::from_parts(35_399_000, 3551) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -719,8 +740,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 42_895_000 picoseconds. - Weight::from_parts(53_945_000, 3533) + // Minimum execution time: 41_934_000 picoseconds. + Weight::from_parts(50_480_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -736,8 +757,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `995` // Estimated: `3593` - // Minimum execution time: 50_770_000 picoseconds. - Weight::from_parts(63_117_000, 3593) + // Minimum execution time: 47_167_000 picoseconds. + Weight::from_parts(54_289_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -749,18 +770,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 33_396_000 picoseconds. - Weight::from_parts(36_247_000, 4698) + // Minimum execution time: 29_755_000 picoseconds. + Weight::from_parts(32_857_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_625_000 picoseconds. - Weight::from_parts(4_011_396, 0) + // Minimum execution time: 3_793_000 picoseconds. + Weight::from_parts(4_086_907, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(60, 0).saturating_mul(n.into())) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -769,13 +792,13 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 6_217_000 picoseconds. - Weight::from_parts(6_608_394, 1487) + // Minimum execution time: 6_262_000 picoseconds. + Weight::from_parts(6_734_896, 1487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -786,10 +809,10 @@ impl WeightInfo for () { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `972` - // Estimated: `4437` - // Minimum execution time: 46_853_000 picoseconds. - Weight::from_parts(47_740_000, 4437) + // Measured: `829` + // Estimated: `3593` + // Minimum execution time: 39_812_000 picoseconds. + Weight::from_parts(41_227_000, 3593) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -808,8 +831,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 34_240_000 picoseconds. - Weight::from_parts(35_910_175, 8499) + // Minimum execution time: 34_576_000 picoseconds. + Weight::from_parts(36_303_629, 8499) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -821,8 +844,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 7_083_000 picoseconds. - Weight::from_parts(7_336_000, 3493) + // Minimum execution time: 6_978_000 picoseconds. + Weight::from_parts(7_206_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -834,8 +857,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 15_029_000 picoseconds. - Weight::from_parts(15_567_000, 4681) + // Minimum execution time: 15_063_000 picoseconds. + Weight::from_parts(15_463_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -843,8 +866,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 123_000 picoseconds. - Weight::from_parts(136_000, 0) + // Minimum execution time: 126_000 picoseconds. + Weight::from_parts(157_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -852,8 +875,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_775_000 picoseconds. - Weight::from_parts(1_911_000, 0) + // Minimum execution time: 1_803_000 picoseconds. + Weight::from_parts(1_965_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Broker::RevenueInbox` (r:0 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + fn notify_revenue() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_902_000 picoseconds. + Weight::from_parts(2_116_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -862,16 +895,16 @@ impl WeightInfo for () { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `4068` - // Minimum execution time: 11_859_000 picoseconds. - Weight::from_parts(12_214_000, 4068) + // Measured: `441` + // Estimated: `1516` + // Minimum execution time: 9_313_000 picoseconds. + Weight::from_parts(9_699_000, 1516) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -879,9 +912,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 5_864_000 picoseconds. - Weight::from_parts(6_231_000, 1526) + // Minimum execution time: 5_984_000 picoseconds. + Weight::from_parts(6_296_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(268_000, 0) + } } diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index 09271632df54b74601d5d318a4749bffcbc86777..d0072e3a476132fb7ea2eeef35d895dc2829dc80 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -16,23 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-bounties = { path = "../bounties", default-features = false } -pallet-treasury = { path = "../treasury", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-bounties = { workspace = true } +pallet-treasury = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index d9405d3d28977c2cac3b42a034eb5fcfe8e56e8f..125844fa70e2c8dbcbb528bb77551942c497d75a 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -69,23 +69,11 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub TreasuryAccount: u128 = Treasury::account_id(); @@ -95,13 +83,8 @@ parameter_types! { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); @@ -138,6 +121,7 @@ impl pallet_bounties::Config for Test { type MaximumReasonLength = ConstU32<300>; type WeightInfo = (); type ChildBountyManager = ChildBounties; + type OnSlash = (); } impl pallet_child_bounties::Config for Test { type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index d966370238bc4f2c8597555187a1830280752732..6fc4bb7782987f6e32faaabea1e4e661822829bb 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -16,16 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 70363562f6af3c52846cb7103e5834c617ac0ae8..252151fb9193b5f76bfae95187dc9db650cf78be 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -18,64 +18,64 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -paste = { version = "1.0", default-features = false } -bitflags = "1.3" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +paste = { workspace = true } +bitflags = { workspace = true } +codec = { features = [ "derive", "max-encoded-len", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -smallvec = { version = "1", default-features = false, features = [ +smallvec = { features = [ "const_generics", -] } -wasmi = { version = "0.32.3", default-features = false } -impl-trait-for-tuples = "0.2" +], workspace = true } +wasmi = { workspace = true } +impl-trait-for-tuples = { workspace = true } # Only used in benchmarking to generate contract code -wasm-instrument = { version = "0.4", optional = true, default-features = false } -rand = { version = "0.8", optional = true, default-features = false } -rand_pcg = { version = "0.3", optional = true } +wasm-instrument = { optional = true, workspace = true } +rand = { optional = true, workspace = true } +rand_pcg = { optional = true, workspace = true } # Substrate Dependencies -environmental = { version = "1.1.4", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-contracts-uapi = { path = "uapi" } -pallet-contracts-proc-macro = { path = "proc-macro" } -sp-api = { path = "../../primitives/api", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +environmental = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { optional = true, workspace = true } +pallet-contracts-uapi = { workspace = true, default-features = true } +pallet-contracts-proc-macro = { workspace = true, default-features = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -assert_matches = "1" -env_logger = "0.11" -pretty_assertions = "1" -wat = "1" -pallet-contracts-fixtures = { path = "./fixtures" } +array-bytes = { workspace = true, default-features = true } +assert_matches = { workspace = true } +env_logger = { workspace = true } +pretty_assertions = { workspace = true } +wat = { workspace = true } +pallet-contracts-fixtures = { workspace = true } # Polkadot Dependencies -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder" } +xcm-builder = { workspace = true, default-features = true } # Substrate Dependencies -pallet-balances = { path = "../balances" } -pallet-timestamp = { path = "../timestamp" } -pallet-message-queue = { path = "../message-queue" } -pallet-insecure-randomness-collective-flip = { path = "../insecure-randomness-collective-flip" } -pallet-utility = { path = "../utility" } -pallet-assets = { path = "../assets" } -pallet-proxy = { path = "../proxy" } -sp-keystore = { path = "../../primitives/keystore" } -sp-tracing = { path = "../../primitives/tracing" } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +pallet-insecure-randomness-collective-flip = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index 8c93c6f16f66fa2c4a6955607fbd9797c0eda925..6b0751571cc9c1cc8b3284c2ea17a23ca48c5194 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -11,17 +11,17 @@ description = "Fixtures for testing contracts pallet." workspace = true [dependencies] -frame-system = { path = "../../system" } -sp-runtime = { path = "../../../primitives/runtime" } -anyhow = "1.0.81" +frame-system = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +anyhow = { workspace = true } [build-dependencies] -parity-wasm = "0.45.0" -tempfile = "3.8.1" -toml = "0.8.2" -twox-hash = "1.6.3" +parity-wasm = { workspace = true } +tempfile = { workspace = true } +toml = { workspace = true } +twox-hash = { workspace = true, default-features = true } polkavm-linker = { workspace = true, optional = true } -anyhow = "1.0.81" +anyhow = { workspace = true } [features] riscv = ["polkavm-linker"] diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index a348b7308d1232109a4763dcc764fdac87deee6d..7df5d304c34afa9258492a121ca23feff0318de9 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -12,41 +12,41 @@ description = "A mock network for testing pallet-contracts" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-assets = { path = "../../assets" } -pallet-balances = { path = "../../balances" } -pallet-contracts = { path = ".." } -pallet-contracts-uapi = { path = "../uapi", default-features = false } -pallet-contracts-proc-macro = { path = "../proc-macro" } -pallet-insecure-randomness-collective-flip = { path = "../../insecure-randomness-collective-flip" } -pallet-message-queue = { path = "../../message-queue" } -pallet-proxy = { path = "../../proxy" } -pallet-timestamp = { path = "../../timestamp" } -pallet-utility = { path = "../../utility" } -pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } -polkadot-primitives = { path = "../../../../polkadot/primitives" } -polkadot-runtime-parachains = { path = "../../../../polkadot/runtime/parachains" } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-tracing = { path = "../../../primitives/tracing" } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder" } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-simulator = { path = "../../../../polkadot/xcm/xcm-simulator" } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-contracts = { workspace = true, default-features = true } +pallet-contracts-uapi = { workspace = true } +pallet-contracts-proc-macro = { workspace = true, default-features = true } +pallet-insecure-randomness-collective-flip = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-builder = { workspace = true, default-features = true } +xcm-executor = { workspace = true } +xcm-simulator = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1" -pretty_assertions = "1" -pallet-contracts-fixtures = { path = "../fixtures" } +assert_matches = { workspace = true } +pretty_assertions = { workspace = true } +pallet-contracts-fixtures = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs index 20ded0f4a0b8475d2cd203ca4acd8164dd9792eb..34cc95f2eae0e8138a3bb988e0351f1002a04bee 100644 --- a/substrate/frame/contracts/mock-network/src/lib.rs +++ b/substrate/frame/contracts/mock-network/src/lib.rs @@ -112,6 +112,7 @@ pub fn para_ext(para_id: u32) -> sp_io::TestExternalities { (0u128, ALICE, INITIAL_BALANCE), (0u128, relay_sovereign_account_id(), INITIAL_BALANCE), ], + next_asset_id: None, } .assimilate_storage(&mut t) .unwrap(); diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml index 4080cd0442dbc516231dc983a3b6609a211523db..3651b172d75fb0c93f9d62980aed0706cbcceeab 100644 --- a/substrate/frame/contracts/proc-macro/Cargo.toml +++ b/substrate/frame/contracts/proc-macro/Cargo.toml @@ -18,6 +18,6 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = "1.0.56" +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["full"], workspace = true } diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index e9cf28a66912be821de182a707d52d74ecd6c519..47772e0a5a0bc34ff3662cdb1bc5211b44032e40 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -529,7 +529,7 @@ pub mod pallet { } } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + #[derive_impl(frame_system::config_preludes::TestDefaultConfig, no_aggregated_types)] impl frame_system::DefaultConfig for TestDefaultConfig {} #[frame_support::register_default_impl(TestDefaultConfig)] diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index e19caa460419eb1557f9ed2cad9d8112e1270175..cb559ec88d281288dc352255ad1b3e93f200151c 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -12,13 +12,13 @@ description = "Exposes all the host functions that a contract can import." workspace = true [dependencies] -paste = { version = "1.0", default-features = false } -bitflags = "1.0" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"], optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +paste = { workspace = true } +bitflags = { workspace = true } +scale-info = { features = ["derive"], optional = true, workspace = true } +codec = { features = [ "derive", "max-encoded-len", -], optional = true } +], optional = true, workspace = true } [target.'cfg(target_arch = "riscv32")'.dependencies] polkavm-derive = { workspace = true } diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index 20de4d858ad62e4e499e5c4ee6786b1f5ee2f6c4..b96809b8aeb4d2a1011c003471f3c918157e60a9 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -16,24 +16,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +assert_matches = { workspace = true } +codec = { features = [ "derive", "max-encoded-len", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-scheduler = { path = "../scheduler" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 0e985e25290fad07961c0a5e5deba4b0148dc1af..78569fb3c9f250d6b5d009936a34a6187639ce1a 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -54,20 +54,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type MaxLocks = ConstU32<10>; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } #[derive(Clone, PartialEq, Eq, Debug)] diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index 8773a124cd02accb450daa2218c161563a68d8bf..44290402594a956e58250cf4086d4f0e9cc4400f 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -16,18 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -pallet-ranked-collective = { path = "../ranked-collective", default-features = false, optional = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +pallet-ranked-collective = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs index b3ee3ab7d165fd87710b207ab4668bba450e2189..34461e062b35cc73c727e81fcdb9ab9081c2580c 100644 --- a/substrate/frame/core-fellowship/src/benchmarking.rs +++ b/substrate/frame/core-fellowship/src/benchmarking.rs @@ -85,6 +85,45 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn set_partial_params() -> Result<(), BenchmarkError> { + let max_rank = T::MaxRank::get().try_into().unwrap(); + + // Set up the initial default state for the Params storage + let params = ParamsType { + active_salary: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + offboard_timeout: 1u32.into(), + }; + CoreFellowship::::set_params(RawOrigin::Root.into(), Box::new(params))?; + + let default_params = Params::::get(); + let expected_params = ParamsType { + active_salary: default_params.active_salary, + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: default_params.demotion_period, + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + offboard_timeout: 1u32.into(), + }; + + let params_payload = ParamsType { + active_salary: BoundedVec::try_from(vec![None; max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![Some(10u32.into()); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![None; max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![Some(100u32.into()); max_rank]) + .unwrap(), + offboard_timeout: None, + }; + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(params_payload.clone())); + + assert_eq!(Params::::get(), expected_params); + Ok(()) + } + #[benchmark] fn bump_offboard() -> Result<(), BenchmarkError> { set_benchmark_params::()?; @@ -171,6 +210,22 @@ mod benchmarks { Ok(()) } + /// Benchmark the `promote_fast` extrinsic to promote someone up to `r`. + #[benchmark] + fn promote_fast(r: Linear<1, { T::MaxRank::get() as u32 }>) -> Result<(), BenchmarkError> { + let r = r.try_into().expect("r is too large"); + let member = make_member::(0)?; + + ensure_evidence::(&member)?; + + #[extrinsic_call] + _(RawOrigin::Root, member.clone(), r); + + assert_eq!(T::Members::rank_of(&member), Some(r)); + assert!(!MemberEvidence::::contains_key(&member)); + Ok(()) + } + #[benchmark] fn offboard() -> Result<(), BenchmarkError> { let member = make_member::(0)?; diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs index 94339b85d0524a297241248312e92b31e761149c..5ed2562d6d019b9a85e3974005831861a3e37887 100644 --- a/substrate/frame/core-fellowship/src/lib.rs +++ b/substrate/frame/core-fellowship/src/lib.rs @@ -209,6 +209,10 @@ pub mod pallet { /// rank to which it can promote. type PromoteOrigin: EnsureOrigin>; + /// The origin that has permission to "fast" promote a member by ignoring promotion periods + /// and skipping ranks. The `Success` value is the maximum rank to which it can promote. + type FastPromoteOrigin: EnsureOrigin>; + /// The maximum size in bytes submitted evidence is allowed to be. #[pallet::constant] type EvidenceSize: Get; @@ -222,6 +226,11 @@ pub mod pallet { pub type ParamsOf = ParamsType<>::Balance, BlockNumberFor, >::MaxRank>; + pub type PartialParamsOf = ParamsType< + Option<>::Balance>, + Option>, + >::MaxRank, + >; pub type MemberStatusOf = MemberStatus>; pub type RankOf = <>::Members as RankedMembers>::Rank; @@ -493,6 +502,44 @@ pub mod pallet { Ok(()) } + /// Fast promotions can skip ranks and ignore the `min_promotion_period`. + /// + /// This is useful for out-of-band promotions, hence it has its own `FastPromoteOrigin` to + /// be (possibly) more restrictive than `PromoteOrigin`. Note that the member must already + /// be inducted. + #[pallet::weight(T::WeightInfo::promote_fast(*to_rank as u32))] + #[pallet::call_index(10)] + pub fn promote_fast( + origin: OriginFor, + who: T::AccountId, + to_rank: RankOf, + ) -> DispatchResult { + match T::FastPromoteOrigin::try_origin(origin) { + Ok(allow_rank) => ensure!(allow_rank >= to_rank, Error::::NoPermission), + Err(origin) => ensure_root(origin)?, + } + ensure!(to_rank as u32 <= T::MaxRank::get(), Error::::InvalidRank); + let curr_rank = T::Members::rank_of(&who).ok_or(Error::::Unranked)?; + ensure!(to_rank > curr_rank, Error::::UnexpectedRank); + + let mut member = Member::::get(&who).ok_or(Error::::NotTracked)?; + let now = frame_system::Pallet::::block_number(); + member.last_promotion = now; + member.last_proof = now; + + for rank in (curr_rank + 1)..=to_rank { + T::Members::promote(&who)?; + + // NOTE: We could factor this out, but it would destroy our invariants: + Member::::insert(&who, &member); + + Self::dispose_evidence(who.clone(), rank.saturating_sub(1), Some(rank)); + Self::deposit_event(Event::::Promoted { who: who.clone(), to_rank: rank }); + } + + Ok(()) + } + /// Stop tracking a prior member who is now not a ranked member of the collective. /// /// - `origin`: A `Signed` origin of an account. @@ -558,9 +605,59 @@ pub mod pallet { Ok(Pays::No.into()) } + + /// Set the parameters partially. + /// + /// - `origin`: An origin complying with `ParamsOrigin` or root. + /// - `partial_params`: The new parameters for the pallet. + /// + /// This update config with multiple arguments without duplicating + /// the fields that does not need to update (set to None). + #[pallet::weight(T::WeightInfo::set_partial_params())] + #[pallet::call_index(9)] + pub fn set_partial_params( + origin: OriginFor, + partial_params: Box>, + ) -> DispatchResult { + T::ParamsOrigin::ensure_origin_or_root(origin)?; + let params = Params::::mutate(|p| { + Self::set_partial_params_slice(&mut p.active_salary, partial_params.active_salary); + Self::set_partial_params_slice( + &mut p.passive_salary, + partial_params.passive_salary, + ); + Self::set_partial_params_slice( + &mut p.demotion_period, + partial_params.demotion_period, + ); + Self::set_partial_params_slice( + &mut p.min_promotion_period, + partial_params.min_promotion_period, + ); + if let Some(new_offboard_timeout) = partial_params.offboard_timeout { + p.offboard_timeout = new_offboard_timeout; + } + p.clone() + }); + Self::deposit_event(Event::::ParamsChanged { params }); + Ok(()) + } } impl, I: 'static> Pallet { + /// Partially update the base slice with a new slice + /// + /// Only elements in the base slice which has a new value in the new slice will be updated. + pub(crate) fn set_partial_params_slice( + base_slice: &mut BoundedVec>::MaxRank>, + new_slice: BoundedVec, >::MaxRank>, + ) { + for (base_element, new_element) in base_slice.iter_mut().zip(new_slice) { + if let Some(element) = new_element { + *base_element = element; + } + } + } /// Convert a rank into a `0..RANK_COUNT` index suitable for the arrays in Params. /// /// Rank 1 becomes index 0, rank `RANK_COUNT` becomes index `RANK_COUNT - 1`. Any rank not diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs index f3137316658576d91616428ee32d51b916189963..bcf70c7beb102c49f86a583e9e4c476aa216275e 100644 --- a/substrate/frame/core-fellowship/src/tests/integration.rs +++ b/substrate/frame/core-fellowship/src/tests/integration.rs @@ -78,6 +78,7 @@ impl Config for Test { type InductOrigin = EnsureInducted; type ApproveOrigin = TryMapSuccess, u64>, TryMorphInto>; type PromoteOrigin = TryMapSuccess, u64>, TryMorphInto>; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = EvidenceSize; type MaxRank = ConstU32<9>; } @@ -157,6 +158,7 @@ impl pallet_ranked_collective::Config for Test { type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = CoreFellowship; type VoteWeight = Geometric; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = CoreFellowship; } diff --git a/substrate/frame/core-fellowship/src/tests/unit.rs b/substrate/frame/core-fellowship/src/tests/unit.rs index 9245e5159a901d47842339836ae3f0767a2b1b1f..11d1ea9fe5b7563f16d6d9c7118071ec2b1a2e92 100644 --- a/substrate/frame/core-fellowship/src/tests/unit.rs +++ b/substrate/frame/core-fellowship/src/tests/unit.rs @@ -21,7 +21,7 @@ use std::collections::BTreeMap; use core::cell::RefCell; use frame_support::{ - assert_noop, assert_ok, derive_impl, ord_parameter_types, + assert_noop, assert_ok, derive_impl, hypothetically, ord_parameter_types, pallet_prelude::Weight, parameter_types, traits::{tokens::GetSalary, ConstU32, IsInVec, TryMapSuccess}, @@ -115,6 +115,7 @@ impl Config for Test { type InductOrigin = EnsureInducted; type ApproveOrigin = TryMapSuccess, u64>, TryMorphInto>; type PromoteOrigin = TryMapSuccess, u64>, TryMorphInto>; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = ConstU32<1024>; type MaxRank = ConstU32<9>; } @@ -187,6 +188,40 @@ fn set_params_works() { }); } +#[test] +fn set_partial_params_works() { + new_test_ext().execute_with(|| { + let params = ParamsType { + active_salary: bounded_vec![None; 9], + passive_salary: bounded_vec![None; 9], + demotion_period: bounded_vec![None, Some(10), None, None, None, None, None, None, None], + min_promotion_period: bounded_vec![None; 9], + offboard_timeout: Some(2), + }; + assert_noop!( + CoreFellowship::set_partial_params(signed(2), Box::new(params.clone())), + DispatchError::BadOrigin + ); + assert_ok!(CoreFellowship::set_partial_params(signed(1), Box::new(params))); + + // Update params from the base params value declared in `new_test_ext` + let raw_updated_params = ParamsType { + active_salary: bounded_vec![10, 20, 30, 40, 50, 60, 70, 80, 90], + passive_salary: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + demotion_period: bounded_vec![2, 10, 6, 8, 10, 12, 14, 16, 18], + min_promotion_period: bounded_vec![3, 6, 9, 12, 15, 18, 21, 24, 27], + offboard_timeout: 2, + }; + // Updated params stored in Params storage value + let updated_params = Params::::get(); + assert_eq!(raw_updated_params, updated_params); + + System::assert_last_event( + Event::::ParamsChanged { params: updated_params }.into(), + ); + }); +} + #[test] fn induct_works() { new_test_ext().execute_with(|| { @@ -222,6 +257,99 @@ fn promote_works() { }); } +#[test] +fn promote_fast_works() { + let alice = 1; + + new_test_ext().execute_with(|| { + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 1), + Error::::Unranked + ); + set_rank(alice, 0); + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 1), + Error::::NotTracked + ); + assert_ok!(CoreFellowship::import(signed(alice))); + + // Cannot fast promote to the same rank: + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 0), + Error::::UnexpectedRank + ); + assert_ok!(CoreFellowship::promote_fast(signed(alice), alice, 1)); + assert_eq!(TestClub::rank_of(&alice), Some(1)); + + // Cannot promote normally because of the period: + assert_noop!(CoreFellowship::promote(signed(2), alice, 2), Error::::TooSoon); + // But can fast promote: + assert_ok!(CoreFellowship::promote_fast(signed(2), alice, 2)); + assert_eq!(TestClub::rank_of(&alice), Some(2)); + + // Cannot promote to lower rank: + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 0), + Error::::UnexpectedRank + ); + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 1), + Error::::UnexpectedRank + ); + // Permission is checked: + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 2), + Error::::NoPermission + ); + + // Can fast promote up to the maximum: + assert_ok!(CoreFellowship::promote_fast(signed(9), alice, 9)); + // But not past the maximum: + assert_noop!( + CoreFellowship::promote_fast(RuntimeOrigin::root(), alice, 10), + Error::::InvalidRank + ); + }); +} + +/// Compare the storage root hashes of a normal promote and a fast promote. +#[test] +fn promote_fast_identical_to_promote() { + let alice = 1; + + new_test_ext().execute_with(|| { + set_rank(alice, 0); + assert_eq!(TestClub::rank_of(&alice), Some(0)); + assert_ok!(CoreFellowship::import(signed(alice))); + run_to(3); + assert_eq!(TestClub::rank_of(&alice), Some(0)); + assert_ok!(CoreFellowship::submit_evidence( + signed(alice), + Wish::Promotion, + bounded_vec![0; 1024] + )); + + let root_promote = hypothetically!({ + assert_ok!(CoreFellowship::promote(signed(alice), alice, 1)); + // Don't clean the events since they should emit the same events: + sp_io::storage::root(sp_runtime::StateVersion::V1) + }); + + // This is using thread locals instead of storage... + TestClub::demote(&alice).unwrap(); + + let root_promote_fast = hypothetically!({ + assert_ok!(CoreFellowship::promote_fast(signed(alice), alice, 1)); + + sp_io::storage::root(sp_runtime::StateVersion::V1) + }); + + assert_eq!(root_promote, root_promote_fast); + // Ensure that we don't compare trivial stuff like `()` from a type error above. + assert_eq!(root_promote.len(), 32); + }); +} + #[test] fn sync_works() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs index 8fad6f585c1129e9a6c4160ed396cea28158e990..5e64600b662b948dee4cbe00f588aaff5f8a76fd 100644 --- a/substrate/frame/core-fellowship/src/weights.rs +++ b/substrate/frame/core-fellowship/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_core_fellowship` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -50,11 +50,13 @@ use core::marker::PhantomData; /// Weight functions needed for `pallet_core_fellowship`. pub trait WeightInfo { fn set_params() -> Weight; + fn set_partial_params() -> Weight; fn bump_offboard() -> Weight; fn bump_demote() -> Weight; fn set_active() -> Weight; fn induct() -> Weight; fn promote() -> Weight; + fn promote_fast(r: u32, ) -> Weight; fn offboard() -> Weight; fn import() -> Weight; fn approve() -> Weight; @@ -70,8 +72,19 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_633_000 picoseconds. - Weight::from_parts(8_018_000, 0) + // Minimum execution time: 5_772_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `CoreFellowship::Params` (r:1 w:1) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `399` + // Estimated: `1853` + // Minimum execution time: 10_050_000 picoseconds. + Weight::from_parts(10_244_000, 1853) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -92,8 +105,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 57_597_000 picoseconds. - Weight::from_parts(58_825_000, 19894) + // Minimum execution time: 54_433_000 picoseconds. + Weight::from_parts(55_650_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -115,8 +128,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 61_387_000 picoseconds. - Weight::from_parts(63_408_000, 19894) + // Minimum execution time: 57_634_000 picoseconds. + Weight::from_parts(58_816_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -128,8 +141,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 15_941_000 picoseconds. - Weight::from_parts(16_547_000, 3514) + // Minimum execution time: 14_527_000 picoseconds. + Weight::from_parts(14_948_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -147,8 +160,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 24_963_000 picoseconds. - Weight::from_parts(25_873_000, 3514) + // Minimum execution time: 22_137_000 picoseconds. + Weight::from_parts(22_925_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -170,11 +183,38 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 55_062_000 picoseconds. - Weight::from_parts(58_422_000, 19894) + // Minimum execution time: 51_837_000 picoseconds. + Weight::from_parts(52_810_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:9 w:9) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:9) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:9) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 9]`. + fn promote_fast(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `16844` + // Estimated: `19894 + r * (2489 ยฑ0)` + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into())) + } /// Storage: `RankedCollective::Members` (r:1 w:0) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -185,8 +225,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 15_901_000 picoseconds. - Weight::from_parts(16_746_000, 3514) + // Minimum execution time: 14_321_000 picoseconds. + Weight::from_parts(14_747_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -198,8 +238,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 14_768_000 picoseconds. - Weight::from_parts(15_421_000, 3514) + // Minimum execution time: 13_525_000 picoseconds. + Weight::from_parts(13_843_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -213,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 36_925_000 picoseconds. - Weight::from_parts(38_330_000, 19894) + // Minimum execution time: 34_719_000 picoseconds. + Weight::from_parts(35_162_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -226,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 25_210_000 picoseconds. - Weight::from_parts(26_247_000, 19894) + // Minimum execution time: 23_477_000 picoseconds. + Weight::from_parts(23_897_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -241,8 +281,19 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_633_000 picoseconds. - Weight::from_parts(8_018_000, 0) + // Minimum execution time: 5_772_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `CoreFellowship::Params` (r:1 w:1) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `399` + // Estimated: `1853` + // Minimum execution time: 10_050_000 picoseconds. + Weight::from_parts(10_244_000, 1853) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -263,8 +314,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 57_597_000 picoseconds. - Weight::from_parts(58_825_000, 19894) + // Minimum execution time: 54_433_000 picoseconds. + Weight::from_parts(55_650_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -286,8 +337,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 61_387_000 picoseconds. - Weight::from_parts(63_408_000, 19894) + // Minimum execution time: 57_634_000 picoseconds. + Weight::from_parts(58_816_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -299,8 +350,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 15_941_000 picoseconds. - Weight::from_parts(16_547_000, 3514) + // Minimum execution time: 14_527_000 picoseconds. + Weight::from_parts(14_948_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -318,8 +369,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 24_963_000 picoseconds. - Weight::from_parts(25_873_000, 3514) + // Minimum execution time: 22_137_000 picoseconds. + Weight::from_parts(22_925_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -341,11 +392,38 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 55_062_000 picoseconds. - Weight::from_parts(58_422_000, 19894) + // Minimum execution time: 51_837_000 picoseconds. + Weight::from_parts(52_810_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:9 w:9) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:9) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:9) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 9]`. + fn promote_fast(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `16844` + // Estimated: `19894 + r * (2489 ยฑ0)` + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into())) + } /// Storage: `RankedCollective::Members` (r:1 w:0) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -356,8 +434,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 15_901_000 picoseconds. - Weight::from_parts(16_746_000, 3514) + // Minimum execution time: 14_321_000 picoseconds. + Weight::from_parts(14_747_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -369,8 +447,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 14_768_000 picoseconds. - Weight::from_parts(15_421_000, 3514) + // Minimum execution time: 13_525_000 picoseconds. + Weight::from_parts(13_843_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -384,8 +462,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 36_925_000 picoseconds. - Weight::from_parts(38_330_000, 19894) + // Minimum execution time: 34_719_000 picoseconds. + Weight::from_parts(35_162_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -397,8 +475,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 25_210_000 picoseconds. - Weight::from_parts(26_247_000, 19894) + // Minimum execution time: 23_477_000 picoseconds. + Weight::from_parts(23_897_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/delegated-staking/Cargo.toml b/substrate/frame/delegated-staking/Cargo.toml index 3b122dc2e26c3c9d0f56f07427b037bd1cd388c0..0c1bcf0df0c5e3422367d64ec3c3d21e85f50e75 100644 --- a/substrate/frame/delegated-staking/Cargo.toml +++ b/substrate/frame/delegated-staking/Cargo.toml @@ -12,25 +12,25 @@ description = "FRAME delegated staking pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } +codec = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -substrate-test-utils = { path = "../../test-utils" } -sp-tracing = { path = "../../primitives/tracing" } -pallet-staking = { path = "../staking" } -pallet-nomination-pools = { path = "../nomination-pools" } -pallet-balances = { path = "../balances" } -pallet-timestamp = { path = "../timestamp" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs index 4b924bce3a579ace305d998cc1db84dee82f78bf..f16bb0d1dc08da50f7c936726b107c7ad35b3fc1 100644 --- a/substrate/frame/delegated-staking/src/lib.rs +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -823,10 +823,6 @@ impl Pallet { ) -> Result<(), sp_runtime::TryRuntimeError> { let mut delegation_aggregation = BTreeMap::>::new(); for (delegator, delegation) in delegations.iter() { - ensure!( - T::CoreStaking::status(delegator).is_err(), - "delegator should not be directly staked" - ); ensure!(!Self::is_agent(delegator), "delegator cannot be an agent"); delegation_aggregation diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs index c1875055f2fec78ea80c908bb11379291efad4a8..811d5739f4e98f24c30c7514455dac049fbbc421 100644 --- a/substrate/frame/delegated-staking/src/mock.rs +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, parameter_types, - traits::{ConstU64, Currency}, + traits::{ConstU64, Currency, VariantCountOf}, PalletId, }; @@ -44,7 +44,7 @@ pub const GENESIS_VALIDATOR: AccountId = 1; pub const GENESIS_NOMINATOR_ONE: AccountId = 101; pub const GENESIS_NOMINATOR_TWO: AccountId = 102; -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; type AccountData = pallet_balances::AccountData; @@ -64,19 +64,14 @@ pub type Balance = u128; parameter_types! { pub static ExistentialDeposit: Balance = 1; } + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = RuntimeHoldReason; + type MaxFreezes = VariantCountOf; type RuntimeFreezeReason = RuntimeFreezeReason; } @@ -93,7 +88,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; - pub static BondingDuration: u32 = 3; pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub struct OnChainSeqPhragmen; @@ -106,35 +100,17 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBoundsOnChain; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = ConstU32<1>; - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type HistoryDepth = ConstU32<84>; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<10>; - type MaxControllersInDeprecationBatch = ConstU32<100>; type EventListeners = (Pools, DelegatedStaking); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs index d40539d40dddac6c517691bef58a41509949f421..385bb17ddadbdf8f5a6bc2748a767d6ec72bfc5b 100644 --- a/substrate/frame/delegated-staking/src/tests.rs +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -21,7 +21,7 @@ use super::*; use crate::mock::*; use frame_support::{assert_noop, assert_ok, traits::fungible::InspectHold}; use pallet_nomination_pools::{Error as PoolsError, Event as PoolsEvent}; -use pallet_staking::Error as StakingError; +use pallet_staking::{Error as StakingError, RewardDestination}; use sp_staking::{Agent, DelegationInterface, Delegator, StakerStatus}; #[test] @@ -337,7 +337,6 @@ fn apply_pending_slash() { /// Integration tests with pallet-staking. mod staking_integration { use super::*; - use pallet_staking::RewardDestination; use sp_staking::Stake; #[test] @@ -501,17 +500,17 @@ mod staking_integration { ExtBuilder::default().build_and_execute(|| { start_era(1); let agent = 200; - setup_delegation_stake(agent, 201, (300..350).collect(), 100, 0); + setup_delegation_stake(agent, 201, (300..350).collect(), 320, 0); // verify withdraw not possible yet assert_noop!( - DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 100, 0), + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 320, 0), Error::::NotEnoughFunds ); // fill up unlocking chunks in core staking. - // 10 is the max chunks - for i in 2..=11 { + // 32 is the max chunks + for i in 2..=33 { start_era(i); assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); // no withdrawals from core staking yet. @@ -519,35 +518,35 @@ mod staking_integration { } // another unbond would trigger withdrawal - start_era(12); + start_era(34); assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); - // 8 previous unbonds would be withdrawn as they were already unlocked. Unlocking period - // is 3 eras. - assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 8 * 10); + // 30 previous unbonds would be withdrawn as they were already unlocked. Unlocking + // period is 3 eras. + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 30 * 10); // release some delegation now. assert_ok!(DelegatedStaking::release_delegation( RawOrigin::Signed(agent).into(), 300, - 40, + 160, 0 )); - assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 80 - 40); + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 300 - 160); // cannot release more than available assert_noop!( - DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 50, 0), + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 141, 0), Error::::NotEnoughFunds ); assert_ok!(DelegatedStaking::release_delegation( RawOrigin::Signed(agent).into(), 300, - 40, + 140, 0 )); - assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(300)), 100 - 80); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(300)), 320 - 300); }); } @@ -1217,6 +1216,46 @@ mod pool_integration { }); } + #[test] + fn existing_pool_member_can_stake() { + // A pool member is able to stake directly since staking only uses free funds but once a + // staker, they cannot join/add extra bond to the pool. They can still withdraw funds. + ExtBuilder::default().build_and_execute(|| { + start_era(1); + // GIVEN: a pool. + fund(&200, 1000); + let pool_id = create_pool(200, 800); + + // WHEN: delegator joins a pool + let delegator = 100; + fund(&delegator, 1000); + assert_ok!(Pools::join(RawOrigin::Signed(delegator).into(), 200, pool_id)); + + // THEN: they can still stake directly. + assert_ok!(Staking::bond( + RuntimeOrigin::signed(delegator), + 500, + RewardDestination::Account(101) + )); + assert_ok!(Staking::nominate( + RuntimeOrigin::signed(delegator), + vec![GENESIS_VALIDATOR] + )); + + // The delegator cannot add any extra bond to the pool anymore. + assert_noop!( + Pools::bond_extra(RawOrigin::Signed(delegator).into(), BondExtra::FreeBalance(100)), + Error::::AlreadyStaking + ); + + // But they can unbond + assert_ok!(Pools::unbond(RawOrigin::Signed(delegator).into(), delegator, 50)); + // and withdraw + start_era(4); + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(delegator).into(), delegator, 0)); + }); + } + fn create_pool(creator: AccountId, amount: Balance) -> u32 { fund(&creator, amount * 2); assert_ok!(Pools::create( diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 7f182447ead61a0b694b7dc21a888fd4480e1197..6a44951e09c4bbf1a64ccff0b60459aba5865193 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -16,24 +16,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-core = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-scheduler = { path = "../scheduler" } -pallet-preimage = { path = "../preimage" } +pallet-balances = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index 9303c0da504f366288f6f06cb33c54abbd125321..7d7066c8af691e7f7a8fe5cd8bfb7dec21d57e9b 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -108,20 +108,9 @@ impl pallet_scheduler::Config for Test { type Preimages = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type MaxLocks = ConstU32<10>; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub static PreimageByteDeposit: u64 = 0; diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index 43e3e7079d2fa78aa1e3515b41df87446ac5b370..941d720f2c6ca869cb8fdae55a27f009ad56e95a 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -15,40 +15,40 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../primitives/io", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +sp-io = { workspace = true } +sp-std = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-npos-elections = { workspace = true } +sp-arithmetic = { workspace = true } +frame-election-provider-support = { workspace = true } # Optional imports for benchmarking -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -pallet-election-provider-support-benchmarking = { path = "../election-provider-support/benchmarking", default-features = false, optional = true } -rand = { version = "0.8.5", default-features = false, features = ["alloc", "small_rng"], optional = true } -strum = { version = "0.26.2", default-features = false, features = ["derive"], optional = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-election-provider-support-benchmarking = { optional = true, workspace = true } +rand = { features = ["alloc", "small_rng"], optional = true, workspace = true } +strum = { features = ["derive"], optional = true, workspace = true } [dev-dependencies] -parking_lot = "0.12.1" -rand = "0.8.5" -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io" } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } -sp-tracing = { path = "../../primitives/tracing" } -pallet-balances = { path = "../balances" } -frame-benchmarking = { path = "../benchmarking" } +parking_lot = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-io = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 92b87d92e99b16c959e9d719e91251da94fa7576..4532185b959c0ca59e6cfd4782f49b3773928b36 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -237,7 +237,6 @@ impl frame_system::Config for Runtime { const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { - pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights ::with_sensible_defaults( Weight::from_parts(2u64 * constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), @@ -245,20 +244,9 @@ parameter_types! { ); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } #[derive(Default, Eq, PartialEq, Debug, Clone, Copy)] diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index fc696e04d689f706a4afa4459ea2b8f885ca22d4..77ecbb1af98f17a6f59351966ff855d5dcde9bbd 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -16,30 +16,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = { version = "2.11.1", features = ["derive"] } +parking_lot = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } log = { workspace = true } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-std = { path = "../../../primitives/std" } -sp-staking = { path = "../../../primitives/staking" } -sp-core = { path = "../../../primitives/core" } -sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false } -sp-tracing = { path = "../../../primitives/tracing" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true } +sp-tracing = { workspace = true, default-features = true } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } -frame-election-provider-support = { path = "../../election-provider-support" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } -pallet-election-provider-multi-phase = { path = ".." } -pallet-staking = { path = "../../staking" } -pallet-nomination-pools = { path = "../../nomination-pools" } -pallet-bags-list = { path = "../../bags-list" } -pallet-balances = { path = "../../balances" } -pallet-timestamp = { path = "../../timestamp" } -pallet-session = { path = "../../session" } +pallet-election-provider-multi-phase = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-session = { workspace = true, default-features = true } [features] try-runtime = [ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 2b1f1335c6fe888c3ff756b00327b74d5ba9f279..aaffbb6681cd26b0bfb6af7c7a8d9e27fe5f06fa 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -22,6 +22,7 @@ pub(crate) const LOG_TARGET: &str = "tests::e2e-epm"; use frame_support::{assert_err, assert_noop, assert_ok}; use mock::*; +use pallet_timestamp::Now; use sp_core::Get; use sp_runtime::Perbill; @@ -46,7 +47,7 @@ fn log_current_time() { Session::current_index(), Staking::current_era(), ElectionProviderMultiPhase::current_phase(), - Timestamp::now() + Now::::get() ); } @@ -209,7 +210,7 @@ fn continuous_slashes_below_offending_threshold() { // failed due to election minimum score. if start_next_active_era(pool_state.clone()).is_err() { assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); - break + break; } active_validator_set = Session::validators(); diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index e5987ec33f06c3263edca88cf0fe7fbdda4ddcf5..bb1bdb31420565a104fb58b553b211411d7a3520 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -19,7 +19,7 @@ use frame_support::{ assert_ok, parameter_types, traits, - traits::{Hooks, UnfilteredDispatchable}, + traits::{Hooks, UnfilteredDispatchable, VariantCountOf}, weights::constants, }; use frame_system::EnsureRoot; @@ -102,20 +102,14 @@ parameter_types! { ); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = traits::ConstU32<1024>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxFreezes = traits::ConstU32<1>; + type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; - type WeightInfo = (); } impl pallet_timestamp::Config for Runtime { @@ -235,7 +229,6 @@ parameter_types! { pub const SessionsPerEra: sp_staking::SessionIndex = 2; pub static BondingDuration: sp_staking::EraIndex = 28; pub const SlashDeferDuration: sp_staking::EraIndex = 7; // 1/4 the bonding duration. - pub HistoryDepth: u32 = 84; } impl pallet_bags_list::Config for Runtime { @@ -291,15 +284,11 @@ const MAX_QUOTA_NOMINATIONS: u32 = 16; /// Disabling factor set explicitly to byzantine threshold pub(crate) const SLASHING_DISABLING_FACTOR: usize = 3; +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); // burn slashes - type Reward = (); // rewards are minted from the void type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; @@ -314,12 +303,10 @@ impl pallet_staking::Config for Runtime { type NominationsQuota = pallet_staking::FixedNominationsQuota; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = MaxUnlockingChunks; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = HistoryDepth; type EventListeners = Pools; type WeightInfo = pallet_staking::weights::SubstrateWeight; - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; } impl frame_system::offchain::SendTransactionTypes for Runtime diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index 1c63f90720f7b010fb584dc44f2e84ad78c61021..012392ffc59d9644181902ac3f5afbb6d444868f 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -15,21 +15,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-election-provider-solution-type = { path = "solution-type" } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-election-provider-solution-type = { workspace = true, default-features = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-npos-elections = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-core = { workspace = true } [dev-dependencies] -rand = { version = "0.8.5", features = ["small_rng"] } -sp-io = { path = "../../primitives/io" } -sp-npos-elections = { path = "../../primitives/npos-elections" } +rand = { features = ["small_rng"], workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/election-provider-support/benchmarking/Cargo.toml b/substrate/frame/election-provider-support/benchmarking/Cargo.toml index c2e644cfefab99145b839bf1c57917369a5f5cb2..ecf41ff9663bb94fb9feffa1991d6a90438d3952 100644 --- a/substrate/frame/election-provider-support/benchmarking/Cargo.toml +++ b/substrate/frame/election-provider-support/benchmarking/Cargo.toml @@ -15,15 +15,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -frame-election-provider-support = { path = "..", default-features = false } -frame-system = { path = "../../system", default-features = false } -sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-election-provider-support = { workspace = true } +frame-system = { workspace = true } +sp-npos-elections = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 0b631bd7bb035345a909a8911f4511f05d1b4f9b..a254f6c9b5b723065530fe53b4e045e1dfe94b92 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -20,14 +20,14 @@ proc-macro = true [dependencies] syn = { features = ["full", "visit"], workspace = true } quote = { workspace = true } -proc-macro2 = "1.0.56" -proc-macro-crate = "3.0.0" +proc-macro2 = { workspace = true } +proc-macro-crate = { workspace = true } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -scale-info = "2.11.1" -sp-arithmetic = { path = "../../../primitives/arithmetic" } +codec = { workspace = true, default-features = true } +scale-info = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } # used by generate_solution_type: -frame-election-provider-support = { path = ".." } -frame-support = { path = "../../support" } -trybuild = "1.0.88" +frame-election-provider-support = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +trybuild = { workspace = true } diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 98da507384fd956e21a01cf075556bbe0b6b916b..2c7a7aea1ca2b16982a69ed1e7af838256734056 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -honggfuzz = "0.5" -rand = { version = "0.8", features = ["small_rng", "std"] } +clap = { features = ["derive"], workspace = true } +honggfuzz = { workspace = true } +rand = { features = ["small_rng", "std"], workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-election-provider-solution-type = { path = ".." } -frame-election-provider-support = { path = "../.." } -sp-arithmetic = { path = "../../../../primitives/arithmetic" } -sp-runtime = { path = "../../../../primitives/runtime" } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-election-provider-solution-type = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } # used by generate_solution_type: -sp-npos-elections = { path = "../../../../primitives/npos-elections", default-features = false } -frame-support = { path = "../../../support" } +sp-npos-elections = { workspace = true } +frame-support = { workspace = true, default-features = true } [[bin]] name = "compact" diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index dbcb740518b1379f54cd4265973b445f53109b7b..b53118b6b6fd2d070427615968fbfa43ad0bf920 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -16,26 +16,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-npos-elections = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-staking = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-utils = { path = "../../test-utils" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/elections-phragmen/src/benchmarking.rs b/substrate/frame/elections-phragmen/src/benchmarking.rs index 55bb1b968fa1b11d0a34761b920b97bd7b20bae1..8e762f667b2a6a0d3b58916e93e91bb8fc56a67b 100644 --- a/substrate/frame/elections-phragmen/src/benchmarking.rs +++ b/substrate/frame/elections-phragmen/src/benchmarking.rs @@ -56,7 +56,7 @@ fn default_stake(num_votes: u32) -> BalanceOf { /// Get the current number of candidates. fn candidate_count() -> u32 { - >::decode_len().unwrap_or(0usize) as u32 + Candidates::::decode_len().unwrap_or(0usize) as u32 } /// Add `c` new candidates. @@ -67,7 +67,7 @@ fn submit_candidates( (0..c) .map(|i| { let account = endowed_account::(prefix, i); - >::submit_candidacy( + Elections::::submit_candidacy( RawOrigin::Signed(account.clone()).into(), candidate_count::(), ) @@ -96,7 +96,7 @@ fn submit_voter( votes: Vec, stake: BalanceOf, ) -> DispatchResultWithPostInfo { - >::vote(RawOrigin::Signed(caller).into(), votes, stake) + Elections::::vote(RawOrigin::Signed(caller).into(), votes, stake) } /// create `num_voter` voters who randomly vote for at most `votes` of `all_candidates` if @@ -121,28 +121,28 @@ fn distribute_voters( /// members, or members and runners-up. fn fill_seats_up_to(m: u32) -> Result, &'static str> { let _ = submit_candidates_with_self_vote::(m, "fill_seats_up_to")?; - assert_eq!(>::candidates().len() as u32, m, "wrong number of candidates."); - >::do_phragmen(); - assert_eq!(>::candidates().len(), 0, "some candidates remaining."); + assert_eq!(Candidates::::get().len() as u32, m, "wrong number of candidates."); + Elections::::do_phragmen(); + assert_eq!(Candidates::::get().len(), 0, "some candidates remaining."); assert_eq!( - >::members().len() + >::runners_up().len(), + Members::::get().len() + RunnersUp::::get().len(), m as usize, "wrong number of members and runners-up", ); - Ok(>::members() + Ok(Members::::get() .into_iter() .map(|m| m.who) - .chain(>::runners_up().into_iter().map(|r| r.who)) + .chain(RunnersUp::::get().into_iter().map(|r| r.who)) .collect()) } /// removes all the storage items to reverse any genesis state. fn clean() { - >::kill(); - >::kill(); - >::kill(); + Members::::kill(); + Candidates::::kill(); + RunnersUp::::kill(); #[allow(deprecated)] - >::remove_all(None); + Voting::::remove_all(None); } benchmarks! { @@ -180,14 +180,14 @@ benchmarks! { // original votes. let mut votes = all_candidates.iter().skip(1).cloned().collect::>(); - submit_voter::(caller.clone(), votes.clone(), stake / >::from(10u32))?; + submit_voter::(caller.clone(), votes.clone(), stake / BalanceOf::::from(10u32))?; // new votes. votes = all_candidates; - assert!(votes.len() > >::get(caller.clone()).votes.len()); + assert!(votes.len() > Voting::::get(caller.clone()).votes.len()); whitelist!(caller); - }: vote(RawOrigin::Signed(caller), votes, stake / >::from(10u32)) + }: vote(RawOrigin::Signed(caller), votes, stake / BalanceOf::::from(10u32)) vote_less { let v in 2 .. T::MaxVotesPerVoter::get(); @@ -205,7 +205,7 @@ benchmarks! { // new votes. votes = votes.into_iter().skip(1).collect::>(); - assert!(votes.len() < >::get(caller.clone()).votes.len()); + assert!(votes.len() < Voting::::get(caller.clone()).votes.len()); whitelist!(caller); }: vote(RawOrigin::Signed(caller), votes, stake) @@ -294,7 +294,7 @@ benchmarks! { let members_and_runners_up = fill_seats_up_to::(m)?; let bailing = members_and_runners_up[0].clone(); - assert!(>::is_member(&bailing)); + assert!(Elections::::is_member(&bailing)); whitelist!(bailing); }: renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::Member) @@ -318,7 +318,7 @@ benchmarks! { let members_and_runners_up = fill_seats_up_to::(m)?; let bailing = members_and_runners_up[T::DesiredMembers::get() as usize + 1].clone(); - assert!(>::is_runner_up(&bailing)); + assert!(Elections::::is_runner_up(&bailing)); whitelist!(bailing); }: renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::RunnerUp) @@ -345,11 +345,11 @@ benchmarks! { clean::(); let _ = fill_seats_up_to::(m)?; - let removing = as_lookup::(>::members_ids()[0].clone()); + let removing = as_lookup::(Elections::::members_ids()[0].clone()); }: remove_member(RawOrigin::Root, removing, true, false) verify { // must still have enough members. - assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); + assert_eq!(Members::::get().len() as u32, T::DesiredMembers::get()); #[cfg(test)] { // reset members in between benchmark tests. @@ -371,15 +371,15 @@ benchmarks! { distribute_voters::(all_candidates, v, T::MaxVotesPerVoter::get() as usize)?; // all candidates leave. - >::kill(); + Candidates::::kill(); // now everyone is defunct - assert!(>::iter().all(|(_, v)| >::is_defunct_voter(&v.votes))); - assert_eq!(>::iter().count() as u32, v); + assert!(Voting::::iter().all(|(_, v)| Elections::::is_defunct_voter(&v.votes))); + assert_eq!(Voting::::iter().count() as u32, v); let root = RawOrigin::Root; }: _(root, v, d) verify { - assert_eq!(>::iter().count() as u32, v - d); + assert_eq!(Voting::::iter().count() as u32, v - d); } election_phragmen { @@ -404,12 +404,12 @@ benchmarks! { let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; let _ = distribute_voters::(all_candidates, v.saturating_sub(c), votes_per_voter as usize)?; }: { - >::on_initialize(T::TermDuration::get()); + Elections::::on_initialize(T::TermDuration::get()); } verify { - assert_eq!(>::members().len() as u32, T::DesiredMembers::get().min(c)); + assert_eq!(Members::::get().len() as u32, T::DesiredMembers::get().min(c)); assert_eq!( - >::runners_up().len() as u32, + RunnersUp::::get().len() as u32, T::DesiredRunnersUp::get().min(c.saturating_sub(T::DesiredMembers::get())), ); diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index b4be07030efbbf3d52405320830f86f524c9b0ad..c6eca40c47182321d5846d38f07408767fddd6f0 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -377,9 +377,9 @@ pub mod pallet { ); ensure!(!votes.is_empty(), Error::::NoVotes); - let candidates_count = >::decode_len().unwrap_or(0); - let members_count = >::decode_len().unwrap_or(0); - let runners_up_count = >::decode_len().unwrap_or(0); + let candidates_count = Candidates::::decode_len().unwrap_or(0); + let members_count = Members::::decode_len().unwrap_or(0); + let runners_up_count = RunnersUp::::decode_len().unwrap_or(0); // can never submit a vote of there are no members, and cannot submit more votes than // all potential vote targets. @@ -393,7 +393,7 @@ pub mod pallet { // Reserve bond. let new_deposit = Self::deposit_of(votes.len()); - let Voter { deposit: old_deposit, .. } = >::get(&who); + let Voter { deposit: old_deposit, .. } = Voting::::get(&who); match new_deposit.cmp(&old_deposit) { Ordering::Greater => { // Must reserve a bit more. @@ -455,7 +455,7 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; - let actual_count = >::decode_len().unwrap_or(0) as u32; + let actual_count = Candidates::::decode_len().unwrap_or(0) as u32; ensure!(actual_count <= candidate_count, Error::::InvalidWitnessData); ensure!( actual_count <= ::MaxCandidates::get(), @@ -470,7 +470,7 @@ pub mod pallet { T::Currency::reserve(&who, T::CandidacyBond::get()) .map_err(|_| Error::::InsufficientCandidateFunds)?; - >::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); + Candidates::::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); Ok(()) } @@ -509,7 +509,7 @@ pub mod pallet { Self::deposit_event(Event::Renounced { candidate: who }); }, Renouncing::RunnerUp => { - >::try_mutate::<_, Error, _>(|runners_up| { + RunnersUp::::try_mutate::<_, Error, _>(|runners_up| { let index = runners_up .iter() .position(|SeatHolder { who: r, .. }| r == &who) @@ -523,7 +523,7 @@ pub mod pallet { })?; }, Renouncing::Candidate(count) => { - >::try_mutate::<_, Error, _>(|candidates| { + Candidates::::try_mutate::<_, Error, _>(|candidates| { ensure!(count >= candidates.len() as u32, Error::::InvalidWitnessData); let index = candidates .binary_search_by(|(c, _)| c.cmp(&who)) @@ -599,7 +599,7 @@ pub mod pallet { ) -> DispatchResult { let _ = ensure_root(origin)?; - >::iter() + Voting::::iter() .take(num_voters as usize) .filter(|(_, x)| Self::is_defunct_voter(&x.votes)) .take(num_defunct as usize) @@ -682,7 +682,6 @@ pub mod pallet { /// /// Invariant: Always sorted based on account id. #[pallet::storage] - #[pallet::getter(fn members)] pub type Members = StorageValue<_, Vec>>, ValueQuery>; @@ -691,7 +690,6 @@ pub mod pallet { /// Invariant: Always sorted based on rank (worse to best). Upon removal of a member, the /// last (i.e. _best_) runner-up will be replaced. #[pallet::storage] - #[pallet::getter(fn runners_up)] pub type RunnersUp = StorageValue<_, Vec>>, ValueQuery>; @@ -702,19 +700,16 @@ pub mod pallet { /// /// Invariant: Always sorted based on account id. #[pallet::storage] - #[pallet::getter(fn candidates)] pub type Candidates = StorageValue<_, Vec<(T::AccountId, BalanceOf)>, ValueQuery>; /// The total number of vote rounds that have happened, excluding the upcoming one. #[pallet::storage] - #[pallet::getter(fn election_rounds)] pub type ElectionRounds = StorageValue<_, u32, ValueQuery>; /// Votes and locked stake of a particular voter. /// /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. #[pallet::storage] - #[pallet::getter(fn voting)] pub type Voting = StorageMap<_, Twox64Concat, T::AccountId, Voter>, ValueQuery>; @@ -768,7 +763,7 @@ pub mod pallet { // they have any lock. NOTE: this means that we will still try to remove a lock // once this genesis voter is removed, and for now it is okay because // remove_lock is noop if lock is not there. - >::insert( + Voting::::insert( &member, Voter { votes: vec![member.clone()], stake: *stake, deposit: Zero::zero() }, ); @@ -811,7 +806,7 @@ impl Pallet { // - `Ok(Option(replacement))` if member was removed and replacement was replaced. // - `Ok(None)` if member was removed but no replacement was found // - `Err(_)` if who is not a member. - let maybe_replacement = >::try_mutate::<_, Error, _>(|members| { + let maybe_replacement = Members::::try_mutate::<_, Error, _>(|members| { let remove_index = members .binary_search_by(|m| m.who.cmp(who)) .map_err(|_| Error::::NotMember)?; @@ -831,7 +826,7 @@ impl Pallet { T::Currency::unreserve(who, removed.deposit); } - let maybe_next_best = >::mutate(|r| r.pop()).map(|next_best| { + let maybe_next_best = RunnersUp::::mutate(|r| r.pop()).map(|next_best| { // defensive-only: Members and runners-up are disjoint. This will always be err and // give us an index to insert. if let Err(index) = members.binary_search_by(|m| m.who.cmp(&next_best.who)) { @@ -847,7 +842,7 @@ impl Pallet { })?; let remaining_member_ids_sorted = - Self::members().into_iter().map(|x| x.who).collect::>(); + Members::::get().into_iter().map(|x| x.who).collect::>(); let outgoing = &[who.clone()]; let maybe_current_prime = T::ChangeMembers::get_prime(); let return_value = match maybe_replacement { @@ -884,7 +879,7 @@ impl Pallet { /// Check if `who` is a candidate. It returns the insert index if the element does not exists as /// an error. fn is_candidate(who: &T::AccountId) -> Result<(), usize> { - Self::candidates().binary_search_by(|c| c.0.cmp(who)).map(|_| ()) + Candidates::::get().binary_search_by(|c| c.0.cmp(who)).map(|_| ()) } /// Check if `who` is a voter. It may or may not be a _current_ one. @@ -894,17 +889,17 @@ impl Pallet { /// Check if `who` is currently an active member. fn is_member(who: &T::AccountId) -> bool { - Self::members().binary_search_by(|m| m.who.cmp(who)).is_ok() + Members::::get().binary_search_by(|m| m.who.cmp(who)).is_ok() } /// Check if `who` is currently an active runner-up. fn is_runner_up(who: &T::AccountId) -> bool { - Self::runners_up().iter().any(|r| &r.who == who) + RunnersUp::::get().iter().any(|r| &r.who == who) } /// Get the members' account ids. pub(crate) fn members_ids() -> Vec { - Self::members().into_iter().map(|m| m.who).collect::>() + Members::::get().into_iter().map(|m| m.who).collect::>() } /// Get a concatenation of previous members and runners-up and their deposits. @@ -912,10 +907,10 @@ impl Pallet { /// These accounts are essentially treated as candidates. fn implicit_candidates_with_deposit() -> Vec<(T::AccountId, BalanceOf)> { // invariant: these two are always without duplicates. - Self::members() + Members::::get() .into_iter() .map(|m| (m.who, m.deposit)) - .chain(Self::runners_up().into_iter().map(|r| (r.who, r.deposit))) + .chain(RunnersUp::::get().into_iter().map(|r| (r.who, r.deposit))) .collect::>() } @@ -932,7 +927,7 @@ impl Pallet { /// Remove a certain someone as a voter. fn do_remove_voter(who: &T::AccountId) { - let Voter { deposit, .. } = >::take(who); + let Voter { deposit, .. } = Voting::::take(who); // remove storage, lock and unreserve. T::Currency::remove_lock(T::PalletId::get(), who); @@ -952,7 +947,7 @@ impl Pallet { let desired_runners_up = T::DesiredRunnersUp::get() as usize; let num_to_elect = desired_runners_up + desired_seats; - let mut candidates_and_deposit = Self::candidates(); + let mut candidates_and_deposit = Candidates::::get(); // add all the previous members and runners-up as candidates as well. candidates_and_deposit.append(&mut Self::implicit_candidates_with_deposit()); @@ -1011,12 +1006,12 @@ impl Pallet { sp_npos_elections::seq_phragmen(num_to_elect, candidate_ids, voters_and_votes, None) .map(|ElectionResult:: { winners, assignments: _ }| { // this is already sorted by id. - let old_members_ids_sorted = >::take() + let old_members_ids_sorted = Members::::take() .into_iter() .map(|m| m.who) .collect::>(); // this one needs a sort by id. - let mut old_runners_up_ids_sorted = >::take() + let mut old_runners_up_ids_sorted = RunnersUp::::take() .into_iter() .map(|r| r.who) .collect::>(); @@ -1122,7 +1117,7 @@ impl Pallet { // fetch deposits from the one recorded one. This will make sure that a // candidate who submitted candidacy before a change to candidacy deposit will // have the correct amount recorded. - >::put( + Members::::put( new_members_sorted_by_id .iter() .map(|(who, stake)| SeatHolder { @@ -1132,7 +1127,7 @@ impl Pallet { }) .collect::>(), ); - >::put( + RunnersUp::::put( new_runners_up_sorted_by_rank .into_iter() .map(|(who, stake)| SeatHolder { @@ -1144,10 +1139,10 @@ impl Pallet { ); // clean candidates. - >::kill(); + Candidates::::kill(); Self::deposit_event(Event::NewTerm { new_members: new_members_sorted_by_id }); - >::mutate(|v| *v += 1); + ElectionRounds::::mutate(|v| *v += 1); }) .map_err(|e| { log::error!(target: LOG_TARGET, "Failed to run election [{:?}].", e,); @@ -1294,11 +1289,11 @@ impl Pallet { } fn candidates_ids() -> Vec { - Pallet::::candidates().iter().map(|(x, _)| x).cloned().collect::>() + Candidates::::get().iter().map(|(x, _)| x).cloned().collect::>() } fn runners_up_ids() -> Vec { - Pallet::::runners_up().into_iter().map(|r| r.who).collect::>() + RunnersUp::::get().into_iter().map(|r| r.who).collect::>() } } @@ -1310,7 +1305,7 @@ mod tests { assert_noop, assert_ok, derive_impl, dispatch::DispatchResultWithPostInfo, parameter_types, - traits::{ConstU32, ConstU64, OnInitialize}, + traits::{ConstU32, OnInitialize}, }; use frame_system::ensure_signed; use sp_runtime::{testing::Header, BuildStorage}; @@ -1322,20 +1317,9 @@ mod tests { type AccountData = pallet_balances::AccountData; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = frame_system::Pallet; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } frame_support::parameter_types! { @@ -1511,22 +1495,22 @@ mod tests { } fn candidate_ids() -> Vec { - Elections::candidates().into_iter().map(|(c, _)| c).collect::>() + Candidates::::get().into_iter().map(|(c, _)| c).collect::>() } fn candidate_deposit(who: &u64) -> u64 { - Elections::candidates() + Candidates::::get() .into_iter() .find_map(|(c, d)| if c == *who { Some(d) } else { None }) .unwrap_or_default() } fn voter_deposit(who: &u64) -> u64 { - Elections::voting(who).deposit + Voting::::get(who).deposit } fn runners_up_ids() -> Vec { - Elections::runners_up().into_iter().map(|r| r.who).collect::>() + RunnersUp::::get().into_iter().map(|r| r.who).collect::>() } fn members_ids() -> Vec { @@ -1534,11 +1518,14 @@ mod tests { } fn members_and_stake() -> Vec<(u64, u64)> { - Elections::members().into_iter().map(|m| (m.who, m.stake)).collect::>() + elections_phragmen::Members::::get() + .into_iter() + .map(|m| (m.who, m.stake)) + .collect::>() } fn runners_up_and_stake() -> Vec<(u64, u64)> { - Elections::runners_up() + RunnersUp::::get() .into_iter() .map(|r| (r.who, r.stake)) .collect::>() @@ -1573,7 +1560,7 @@ mod tests { } fn submit_candidacy(origin: RuntimeOrigin) -> sp_runtime::DispatchResult { - Elections::submit_candidacy(origin, Elections::candidates().len() as u32) + Elections::submit_candidacy(origin, Candidates::::get().len() as u32) } fn vote(origin: RuntimeOrigin, votes: Vec, stake: u64) -> DispatchResultWithPostInfo { @@ -1597,13 +1584,13 @@ mod tests { assert_eq!(::VotingBondFactor::get(), 0); assert_eq!(::CandidacyBond::get(), 3); assert_eq!(::TermDuration::get(), 5); - assert_eq!(Elections::election_rounds(), 0); + assert_eq!(ElectionRounds::::get(), 0); - assert!(Elections::members().is_empty()); - assert!(Elections::runners_up().is_empty()); + assert!(elections_phragmen::Members::::get().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert!(candidate_ids().is_empty()); - assert_eq!(>::decode_len(), None); + assert_eq!(Candidates::::decode_len(), None); assert!(Elections::is_candidate(&1).is_err()); assert!(all_voters().is_empty()); @@ -1618,7 +1605,7 @@ mod tests { .build_and_execute(|| { System::set_block_number(1); assert_eq!( - Elections::members(), + elections_phragmen::Members::::get(), vec![ SeatHolder { who: 1, stake: 10, deposit: 0 }, SeatHolder { who: 2, stake: 20, deposit: 0 } @@ -1626,11 +1613,11 @@ mod tests { ); assert_eq!( - Elections::voting(1), + Voting::::get(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 } ); assert_eq!( - Elections::voting(2), + Voting::::get(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 } ); @@ -1650,19 +1637,19 @@ mod tests { System::set_block_number(1); assert_eq!( - Elections::voting(1), + Voting::::get(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 } ); assert_eq!( - Elections::voting(2), + Voting::::get(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 } ); assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(1))); assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); - assert_eq!(Elections::voting(1), Default::default()); - assert_eq!(Elections::voting(2), Default::default()); + assert_eq!(Voting::::get(1), Default::default()); + assert_eq!(Voting::::get(2), Default::default()); }) } @@ -1673,7 +1660,7 @@ mod tests { .build_and_execute(|| { System::set_block_number(1); assert_eq!( - Elections::members(), + elections_phragmen::Members::::get(), vec![ SeatHolder { who: 1, stake: 10, deposit: 0 }, SeatHolder { who: 2, stake: 20, deposit: 0 }, @@ -1681,11 +1668,11 @@ mod tests { ); assert_eq!( - Elections::voting(1), + Voting::::get(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 } ); assert_eq!( - Elections::voting(2), + Voting::::get(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 } ); @@ -1729,17 +1716,17 @@ mod tests { ExtBuilder::default().term_duration(0).build_and_execute(|| { assert_eq!(::TermDuration::get(), 0); assert_eq!(::DesiredMembers::get(), 2); - assert_eq!(Elections::election_rounds(), 0); + assert_eq!(ElectionRounds::::get(), 0); assert!(members_ids().is_empty()); - assert!(Elections::runners_up().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert!(candidate_ids().is_empty()); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert!(members_ids().is_empty()); - assert!(Elections::runners_up().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert!(candidate_ids().is_empty()); }); } @@ -1780,14 +1767,14 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_eq!(Elections::candidates(), vec![(5, 3)]); + assert_eq!(Candidates::::get(), vec![(5, 3)]); // a runtime upgrade changes the bond. CANDIDACY_BOND.with(|v| *v.borrow_mut() = 4); assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_eq!(Elections::candidates(), vec![(4, 4), (5, 3)]); + assert_eq!(Candidates::::get(), vec![(4, 4), (5, 3)]); // once elected, they each hold their candidacy bond, no more. System::set_block_number(5); @@ -1796,7 +1783,7 @@ mod tests { assert_eq!(balances(&4), (34, 6)); assert_eq!(balances(&5), (45, 5)); assert_eq!( - Elections::members(), + elections_phragmen::Members::::get(), vec![ SeatHolder { who: 4, stake: 34, deposit: 4 }, SeatHolder { who: 5, stake: 45, deposit: 3 }, @@ -1845,7 +1832,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![5]); - assert!(Elections::runners_up().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert!(candidate_ids().is_empty()); assert_noop!(submit_candidacy(RuntimeOrigin::signed(5)), Error::::MemberSubmit); @@ -1971,7 +1958,7 @@ mod tests { // 2 + 1 assert_eq!(balances(&2), (17, 3)); - assert_eq!(Elections::voting(&2).deposit, 3); + assert_eq!(Voting::::get(&2).deposit, 3); assert_eq!(has_lock(&2), 10); assert_eq!(locked_stake_of(&2), 10); @@ -1979,7 +1966,7 @@ mod tests { assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 4], 15)); // 2 + 2 assert_eq!(balances(&2), (16, 4)); - assert_eq!(Elections::voting(&2).deposit, 4); + assert_eq!(Voting::::get(&2).deposit, 4); assert_eq!(has_lock(&2), 15); assert_eq!(locked_stake_of(&2), 15); @@ -1987,7 +1974,7 @@ mod tests { assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 3], 18)); // 2 + 2 assert_eq!(balances(&2), (16, 4)); - assert_eq!(Elections::voting(&2).deposit, 4); + assert_eq!(Voting::::get(&2).deposit, 4); assert_eq!(has_lock(&2), 16); assert_eq!(locked_stake_of(&2), 16); @@ -1995,7 +1982,7 @@ mod tests { assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 12)); // 2 + 1 assert_eq!(balances(&2), (17, 3)); - assert_eq!(Elections::voting(&2).deposit, 3); + assert_eq!(Voting::::get(&2).deposit, 3); assert_eq!(has_lock(&2), 12); assert_eq!(locked_stake_of(&2), 12); }); @@ -2273,9 +2260,9 @@ mod tests { assert_eq!(votes_of(&4), vec![4]); assert_eq!(candidate_ids(), vec![3, 4, 5]); - assert_eq!(>::decode_len().unwrap(), 3); + assert_eq!(Candidates::::decode_len().unwrap(), 3); - assert_eq!(Elections::election_rounds(), 0); + assert_eq!(ElectionRounds::::get(), 0); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2284,13 +2271,13 @@ mod tests { // votes for 5 assert_eq!(balances(&2), (18, 2)); assert_eq!(members_and_stake(), vec![(3, 25), (5, 18)]); - assert!(Elections::runners_up().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert_eq_uvec!(all_voters(), vec![2, 3, 4]); assert!(candidate_ids().is_empty()); - assert_eq!(>::decode_len(), None); + assert_eq!(Candidates::::decode_len(), None); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); }); } @@ -2353,7 +2340,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq!(members_and_stake(), vec![(5, 45)]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); // but now it has a valid target. assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); @@ -2363,7 +2350,7 @@ mod tests { // candidate 4 is affected by an old vote. assert_eq!(members_and_stake(), vec![(4, 28), (5, 45)]); - assert_eq!(Elections::election_rounds(), 2); + assert_eq!(ElectionRounds::::get(), 2); assert_eq_uvec!(all_voters(), vec![3, 5]); }); } @@ -2384,7 +2371,7 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); assert_eq!(members_ids(), vec![4, 5]); }); } @@ -2399,7 +2386,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert!(candidate_ids().is_empty()); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); assert!(members_ids().is_empty()); System::assert_last_event(RuntimeEvent::Elections(super::Event::NewTerm { @@ -2553,7 +2540,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); @@ -2597,7 +2584,7 @@ mod tests { assert_eq!(runners_up_and_stake(), vec![(2, 15), (3, 25)]); // no new candidates but old members and runners-up are always added. assert!(candidate_ids().is_empty()); - assert_eq!(Elections::election_rounds(), b / 5); + assert_eq!(ElectionRounds::::get(), b / 5); assert_eq_uvec!(all_voters(), vec![2, 3, 4, 5]); }; @@ -2621,7 +2608,7 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); // a new candidate assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); @@ -2630,7 +2617,7 @@ mod tests { assert_ok!(Elections::remove_member(RuntimeOrigin::root(), 4, true, true)); assert_eq!(balances(&4), (35, 2)); // slashed - assert_eq!(Elections::election_rounds(), 2); // new election round + assert_eq!(ElectionRounds::::get(), 2); // new election round assert_eq!(members_ids(), vec![3, 5]); // new members }); } @@ -2647,14 +2634,14 @@ mod tests { assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_eq!(>::decode_len().unwrap(), 3); + assert_eq!(Candidates::::decode_len().unwrap(), 3); - assert_eq!(Elections::election_rounds(), 0); + assert_eq!(ElectionRounds::::get(), 0); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![3, 5]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(3))); @@ -2665,7 +2652,7 @@ mod tests { System::set_block_number(10); Elections::on_initialize(System::block_number()); assert!(members_ids().is_empty()); - assert_eq!(Elections::election_rounds(), 2); + assert_eq!(ElectionRounds::::get(), 2); }); } @@ -2730,7 +2717,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq_uvec!(members_ids(), vec![3, 4]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); }); } @@ -3175,13 +3162,13 @@ mod tests { .desired_members(0) .desired_runners_up(0) .build_and_execute(|| { - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_eq!(Elections::candidates().len(), 3); + assert_eq!(Candidates::::get().len(), 3); assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); @@ -3193,7 +3180,7 @@ mod tests { assert_eq!(members_ids().len(), 0); assert_eq!(runners_up_ids().len(), 0); assert_eq!(all_voters().len(), 3); - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); }); // not interested in members @@ -3201,13 +3188,13 @@ mod tests { .desired_members(0) .desired_runners_up(2) .build_and_execute(|| { - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_eq!(Elections::candidates().len(), 3); + assert_eq!(Candidates::::get().len(), 3); assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); @@ -3219,7 +3206,7 @@ mod tests { assert_eq!(members_ids().len(), 0); assert_eq!(runners_up_ids(), vec![3, 4]); assert_eq!(all_voters().len(), 3); - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); }); // not interested in runners-up @@ -3227,13 +3214,13 @@ mod tests { .desired_members(2) .desired_runners_up(0) .build_and_execute(|| { - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_eq!(Elections::candidates().len(), 3); + assert_eq!(Candidates::::get().len(), 3); assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); @@ -3245,7 +3232,7 @@ mod tests { assert_eq!(members_ids(), vec![3, 4]); assert_eq!(runners_up_ids().len(), 0); assert_eq!(all_voters().len(), 3); - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); }); } diff --git a/substrate/frame/elections-phragmen/src/migrations/v3.rs b/substrate/frame/elections-phragmen/src/migrations/v3.rs index cdca1138ebbd2f9de20ebcfaf9e39fb313850347..b0092d6c07bb647f077fa336201e48f575ae9b9b 100644 --- a/substrate/frame/elections-phragmen/src/migrations/v3.rs +++ b/substrate/frame/elections-phragmen/src/migrations/v3.rs @@ -116,16 +116,16 @@ pub fn apply( /// Migrate from the old legacy voting bond (fixed) to the new one (per-vote dynamic). pub fn migrate_voters_to_recorded_deposit(old_deposit: V::Balance) { - >::translate::<(V::Balance, Vec), _>(|_who, (stake, votes)| { + Voting::::translate::<(V::Balance, Vec), _>(|_who, (stake, votes)| { Some(Voter { votes, stake, deposit: old_deposit }) }); - log::info!(target: LOG_TARGET, "migrated {} voter accounts.", >::iter().count()); + log::info!(target: LOG_TARGET, "migrated {} voter accounts.", Voting::::iter().count()); } /// Migrate all candidates to recorded deposit. pub fn migrate_candidates_to_recorded_deposit(old_deposit: V::Balance) { - let _ = >::translate::, _>(|maybe_old_candidates| { + let _ = Candidates::::translate::, _>(|maybe_old_candidates| { maybe_old_candidates.map(|old_candidates| { log::info!(target: LOG_TARGET, "migrated {} candidate accounts.", old_candidates.len()); old_candidates.into_iter().map(|c| (c, old_deposit)).collect::>() @@ -135,7 +135,7 @@ pub fn migrate_candidates_to_recorded_deposit(old_deposit: /// Migrate all members to recorded deposit. pub fn migrate_members_to_recorded_deposit(old_deposit: V::Balance) { - let _ = >::translate::, _>(|maybe_old_members| { + let _ = Members::::translate::, _>(|maybe_old_members| { maybe_old_members.map(|old_members| { log::info!(target: LOG_TARGET, "migrated {} member accounts.", old_members.len()); old_members @@ -148,7 +148,7 @@ pub fn migrate_members_to_recorded_deposit(old_deposit: V: /// Migrate all runners-up to recorded deposit. pub fn migrate_runners_up_to_recorded_deposit(old_deposit: V::Balance) { - let _ = >::translate::, _>( + let _ = RunnersUp::::translate::, _>( |maybe_old_runners_up| { maybe_old_runners_up.map(|old_runners_up| { log::info!( diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 45c7440eb89135eca98cf00a4aaf5d6ad2c094ca..0bb42517eb46748e53c79873b41117d3795f3bd2 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -16,15 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-default-config-example = { path = "default-config", default-features = false } -pallet-dev-mode = { path = "dev-mode", default-features = false } -pallet-example-basic = { path = "basic", default-features = false } -pallet-example-frame-crate = { path = "frame-crate", default-features = false } -pallet-example-kitchensink = { path = "kitchensink", default-features = false } -pallet-example-offchain-worker = { path = "offchain-worker", default-features = false } -pallet-example-split = { path = "split", default-features = false } -pallet-example-single-block-migrations = { path = "single-block-migrations", default-features = false } -pallet-example-tasks = { path = "tasks", default-features = false } +pallet-default-config-example = { workspace = true } +pallet-dev-mode = { workspace = true } +pallet-example-basic = { workspace = true } +pallet-example-frame-crate = { workspace = true } +pallet-example-kitchensink = { workspace = true } +pallet-example-offchain-worker = { workspace = true } +pallet-example-split = { workspace = true } +pallet-example-single-block-migrations = { workspace = true } +pallet-example-tasks = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index ba9f9eca27d79c854dd67a8d81130062f3d1f70d..b34d9efc15b7fef9285eb7ecf741f677d47e472a 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -8,6 +8,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME example pallet" readme = "README.md" +publish = false [lints] workspace = true @@ -16,19 +17,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-balances = { path = "../../balances", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index d351b27eecde3c506e17339f9f7300d522fcc87f..505cd6f906de23bb5045834125201cc51c613217 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -71,20 +71,9 @@ impl frame_system::Config for Test { type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index 0ad5b56cb6faa473d337f54fe7eaedaf31a70d9d..8a0d14edbdbf386c64a675ad33885de632d753a3 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -8,6 +8,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME example pallet demonstrating derive_impl / default_config in action" readme = "README.md" +publish = false [lints] workspace = true @@ -16,15 +17,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index d7570f570946f1df05665ad8804e62ad9c18030a..1dd033d7e071e29027109f624cbc613f542e9575 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -16,18 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-balances = { path = "../../balances", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/dev-mode/src/tests.rs b/substrate/frame/examples/dev-mode/src/tests.rs index e8a18ec13fe912497e32a821e4f10d75582609e1..637864b87bc43d9e4e55cd9fe4f054cf5efd1ca8 100644 --- a/substrate/frame/examples/dev-mode/src/tests.rs +++ b/substrate/frame/examples/dev-mode/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for pallet-dev-mode. use crate::*; -use frame_support::{assert_ok, derive_impl, traits::ConstU64}; +use frame_support::{assert_ok, derive_impl}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -65,20 +65,9 @@ impl frame_system::Config for Test { type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; } impl Config for Test { diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 29984bab3e0ffc5ac881eb086d91820c01e7d504..e5137526026e6f69d11408541686039a9f804c62 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } -frame = { package = "polkadot-sdk-frame", path = "../..", default-features = false, features = ["experimental", "runtime"] } +frame = { features = ["experimental", "runtime"], workspace = true } [features] diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index db3e22daa01bd044cae876aabeac25543e814dcb..b3869dff94142de172340ce7f02b701389d1b5a5 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -16,23 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../support", default-features = false, features = ["experimental"] } -frame-system = { path = "../../system", default-features = false } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } -pallet-balances = { path = "../../balances", default-features = false } +pallet-balances = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/kitchensink/src/tests.rs b/substrate/frame/examples/kitchensink/src/tests.rs index 1205fefc422983d2b6f621b601a96b786353f83a..7cf95497bf06408b0aae5543513a81a3316ddeaa 100644 --- a/substrate/frame/examples/kitchensink/src/tests.rs +++ b/substrate/frame/examples/kitchensink/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for pallet-example-kitchensink. use crate::*; -use frame_support::{assert_ok, derive_impl, parameter_types, traits::ConstU64}; +use frame_support::{assert_ok, derive_impl, parameter_types, traits::VariantCountOf}; use sp_runtime::BuildStorage; // Reexport crate as its pallet name for construct_runtime. use crate as pallet_example_kitchensink; @@ -43,20 +43,14 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; } parameter_types! { diff --git a/substrate/frame/examples/multi-block-migrations/Cargo.toml b/substrate/frame/examples/multi-block-migrations/Cargo.toml index 61bb2bc61b4e3758fc47c413f35ac5420bbfb4ef..91d0a71bb34153eacaff87356336ebcf10db3674 100644 --- a/substrate/frame/examples/multi-block-migrations/Cargo.toml +++ b/substrate/frame/examples/multi-block-migrations/Cargo.toml @@ -13,14 +13,14 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -pallet-migrations = { path = "../../migrations", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } +codec = { workspace = true } +pallet-migrations = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +log = { workspace = true } +scale-info = { workspace = true } +sp-io = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index 23ce79c34402da91cea1f1e02ae7ebd90dc6e5d3..f8ccd1f04e47534857894a6a5a1c497d48eb446b 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -8,6 +8,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME example pallet for offchain worker" readme = "README.md" +publish = false [lints] workspace = true @@ -16,17 +17,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -lite-json = { version = "0.2.0", default-features = false } +codec = { workspace = true } +lite-json = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-keystore = { path = "../../../primitives/keystore", optional = true, default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index 080500f629671837316e724b6825493c3fcb4b67..a6cf020ce535d96cb81b9f93955bbbf795a62e34 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -13,20 +13,20 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -docify = "0.2.8" -log = { version = "0.4.21", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false } -frame-executive = { path = "../../executive", default-features = false } -frame-system = { path = "../../system", default-features = false } -frame-try-runtime = { path = "../../try-runtime", default-features = false, optional = true } -pallet-balances = { path = "../../balances", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-version = { path = "../../../primitives/version", default-features = false } +docify = { workspace = true } +log = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-executive = { workspace = true } +frame-system = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-balances = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-version = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/single-block-migrations/src/mock.rs b/substrate/frame/examples/single-block-migrations/src/mock.rs index 68594cc4ad727a18f97b5d8b55381d36d1e7aeed..f4cf81ea6474f2971c08b1174e276fe0b7b9a0f0 100644 --- a/substrate/frame/examples/single-block-migrations/src/mock.rs +++ b/substrate/frame/examples/single-block-migrations/src/mock.rs @@ -18,7 +18,7 @@ #![cfg(any(all(feature = "try-runtime", test), doc))] use crate::*; -use frame_support::{derive_impl, traits::ConstU64, weights::constants::ParityDbWeight}; +use frame_support::{derive_impl, weights::constants::ParityDbWeight}; // Re-export crate as its pallet name for construct_runtime. use crate as pallet_example_storage_migration; @@ -41,20 +41,9 @@ impl frame_system::Config for MockRuntime { type DbWeight = ParityDbWeight; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for MockRuntime { - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); } impl Config for MockRuntime {} diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index 6cb4d7ddd6c06951b749d796746a7f5a63ee343f..d8a8c6869ab5f78b16a793e544face8866d24b8d 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -8,6 +8,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME example split pallet" readme = "README.md" +publish = false [lints] workspace = true @@ -16,20 +17,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-io = { workspace = true } +sp-std = { workspace = true } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 95246ef3f6643aafe215bb4bacf0e2e2598149ae..1ca62fd89943ec729af9a2c059b1b434fe274ce8 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true license.workspace = true repository.workspace = true description = "Pallet to demonstrate the usage of Tasks to recognize and execute service work" +publish = false [lints] workspace = true @@ -14,19 +15,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-core = { default-features = false, path = "../../../primitives/core" } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-core = { workspace = true } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index 4cce0fa9f9504ae48ca1929f87311896b81ca5cf..0f9741eec180d172c24434d587d723735551148c 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -16,29 +16,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -aquamarine = "0.5.0" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +aquamarine = { workspace = true } +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -frame-try-runtime = { path = "../try-runtime", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-tracing = { path = "../../primitives/tracing", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-tracing = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -pallet-balances = { path = "../balances" } -pallet-transaction-payment = { path = "../transaction-payment" } -sp-core = { path = "../../primitives/core" } -sp-inherents = { path = "../../primitives/inherents" } -sp-io = { path = "../../primitives/io" } -sp-version = { path = "../../primitives/version" } +array-bytes = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs index e3721f7b6dcbac24be073515d78fbf0f1d2658b8..69a970a89d93070fec01a3b80867e1ff529d014c 100644 --- a/substrate/frame/executive/src/tests.rs +++ b/substrate/frame/executive/src/tests.rs @@ -36,7 +36,7 @@ use frame_support::{ migrations::MultiStepMigrator, pallet_prelude::*, parameter_types, - traits::{fungible, ConstU8, Currency, IsInherent}, + traits::{fungible, ConstU8, Currency, IsInherent, VariantCount, VariantCountOf}, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightMeter, WeightToFee}, }; use frame_system::{pallet_prelude::*, ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; @@ -325,12 +325,24 @@ impl frame_system::Config for Runtime { type MultiBlockMigrator = MockedModeGetter; } +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, MaxEncodedLen, TypeInfo, RuntimeDebug)] +pub enum FreezeReasonId { + Foo, +} + +impl VariantCount for FreezeReasonId { + const VARIANT_COUNT: u32 = 1; +} + type Balance = u64; #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type Balance = Balance; type AccountStore = System; + type RuntimeFreezeReason = FreezeReasonId; + type FreezeIdentifier = FreezeReasonId; + type MaxFreezes = VariantCountOf; } parameter_types! { @@ -637,8 +649,8 @@ fn block_weight_limit_enforced() { assert!(res.is_ok()); assert_eq!( >::block_weight().total(), - //--------------------- on_initialize + block_execution + extrinsic_base weight - Weight::from_parts((encoded_len + 5) * (nonce + 1), 0) + base_block_weight, + //--------------------- on_initialize + block_execution + extrinsic_base weight + extrinsic len + Weight::from_parts((encoded_len + 5) * (nonce + 1), (nonce + 1)* encoded_len) + base_block_weight, ); assert_eq!( >::extrinsic_index(), @@ -686,9 +698,10 @@ fn block_weight_and_size_is_stored_per_tx() { ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; + // Check we account for all extrinsic weight and their len. assert_eq!( >::block_weight().total(), - base_block_weight + 3u64 * extrinsic_weight, + base_block_weight + 3u64 * extrinsic_weight + 3u64 * Weight::from_parts(0, len as u64), ); assert_eq!(>::all_extrinsics_len(), 3 * len); @@ -743,8 +756,12 @@ fn validate_unsigned() { fn can_not_pay_for_tx_fee_on_full_lock() { let mut t = new_test_ext(1); t.execute_with(|| { - as fungible::MutateFreeze>::set_freeze(&(), &1, 110) - .unwrap(); + as fungible::MutateFreeze>::set_freeze( + &FreezeReasonId::Foo, + &1, + 110, + ) + .unwrap(); let xt = TestXt::new( RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), sign_extra(1, 0, 0), diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index 5b7121e2eae3788fdf37c0467871f0233dd62393..59676ad39951951c6046bb6de303ee7ec9c0b21f 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -15,31 +15,31 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-staking = { workspace = true } +frame-election-provider-support = { workspace = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } -docify = "0.2.8" +docify = { workspace = true } [dev-dependencies] -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -sp-core = { path = "../../primitives/core", default-features = false } -substrate-test-utils = { path = "../../test-utils" } -sp-tracing = { path = "../../primitives/tracing" } -pallet-staking = { path = "../staking" } -pallet-balances = { path = "../balances" } -pallet-timestamp = { path = "../timestamp" } +pallet-staking-reward-curve = { workspace = true, default-features = true } +sp-core = { workspace = true } +substrate-test-utils = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index 9238a085141df2a4230f5b96aa6ec80426197a46..7ce7fee1410782a2166f7e696f2f0cad71746b40 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -60,20 +60,11 @@ parameter_types! { pub static ExistentialDeposit: Balance = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } pallet_staking_reward_curve::build! { @@ -113,35 +104,17 @@ impl frame_election_provider_support::ElectionProvider for MockElection { } } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type HistoryDepth = ConstU32<84>; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = MockElection; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 730c4e70935c0be3cb840087011a37ca9668b317..58faecde62939f50d6b901ec9a5f3fa32ee8eda6 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -16,20 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -blake2 = { version = "0.10.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +blake2 = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-inherents = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -43,6 +44,7 @@ std = [ "pallet-balances/std", "scale-info/std", "sp-core/std", + "sp-inherents/std", "sp-io/std", "sp-runtime/std", "sp-std/std", diff --git a/substrate/frame/glutton/README.md b/substrate/frame/glutton/README.md index 89dbe26ec7a9d7eb0ad379f0526a770b4ba45a28..43642df19104a709bdab08661848774ff955fb48 100644 --- a/substrate/frame/glutton/README.md +++ b/substrate/frame/glutton/README.md @@ -7,6 +7,7 @@ The `Glutton` pallet gets the name from its property to consume vast amounts of resources. It can be used to push para-chains and their relay-chains to the limits. This is good for testing out theoretical limits in a practical way. -The `Glutton` can be set to consume a fraction of the available unused weight of a chain. It accomplishes this by -utilizing the `on_idle` hook and consuming a specific ration of the remaining weight. The rations can be set via -`set_compute` and `set_storage`. Initially the `Glutton` needs to be initialized once with `initialize_pallet`. +The `Glutton` can be set to consume a fraction of the available block length and unused weight of a chain. It +accomplishes this by filling the block length up to a ration and utilizing the `on_idle` hook to consume a +specific ration of the remaining weight. The rations can be set via `set_compute`, `set_storage` and `set_block_length`. +Initially the `Glutton` needs to be initialized once with `initialize_pallet`. diff --git a/substrate/frame/glutton/src/lib.rs b/substrate/frame/glutton/src/lib.rs index 344a70becaeb9eb35cb17c640977f4c8401684ab..5427173b486bab7dfbc830d6af130bf8bccf7e6a 100644 --- a/substrate/frame/glutton/src/lib.rs +++ b/substrate/frame/glutton/src/lib.rs @@ -89,6 +89,11 @@ pub mod pallet { /// The storage limit. storage: FixedU64, }, + /// The block length limit has been updated. + BlockLengthLimitSet { + /// The block length limit. + block_length: FixedU64, + }, } #[pallet::error] @@ -116,6 +121,13 @@ pub mod pallet { #[pallet::storage] pub(crate) type Storage = StorageValue<_, FixedU64, ValueQuery>; + /// The proportion of the `block length` to consume on each block. + /// + /// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to + /// over `1.0` could stall the chain. + #[pallet::storage] + pub(crate) type Length = StorageValue<_, FixedU64, ValueQuery>; + /// Storage map used for wasting proof size. /// /// It contains no meaningful data - hence the name "Trash". The maximal number of entries is @@ -146,6 +158,8 @@ pub mod pallet { pub storage: FixedU64, /// The amount of trash data for wasting proof size. pub trash_data_count: u32, + /// The block length limit. + pub block_length: FixedU64, #[serde(skip)] /// The required configuration field. pub _config: sp_std::marker::PhantomData, @@ -170,6 +184,9 @@ pub mod pallet { assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane"); >::put(self.storage); + + assert!(self.block_length <= RESOURCE_HARD_LIMIT, "Block length limit is insane"); + >::put(self.block_length); } } @@ -208,6 +225,40 @@ pub mod pallet { } } + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + + const INHERENT_IDENTIFIER: InherentIdentifier = *b"bloated0"; + + fn create_inherent(_data: &InherentData) -> Option { + let max_block_length = *T::BlockLength::get().max.get(DispatchClass::Mandatory); + let bloat_size = Length::::get().saturating_mul_int(max_block_length) as usize; + let amount_trash = bloat_size / VALUE_SIZE; + let garbage = TrashData::::iter() + .map(|(_k, v)| v) + .collect::>() + .into_iter() + .cycle() + .take(amount_trash) + .collect::>(); + + Some(Call::bloat { garbage }) + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::bloat { .. }) + } + + fn check_inherent(call: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + match call { + Call::bloat { .. } => Ok(()), + _ => unreachable!("other calls are not inherents"), + } + } + } + #[pallet::call(weight = T::WeightInfo)] impl Pallet { /// Initialize the pallet. Should be called once, if no genesis state was provided. @@ -277,6 +328,31 @@ pub mod pallet { Self::deposit_event(Event::StorageLimitSet { storage }); Ok(()) } + + /// Increase the block size by including the specified garbage bytes. + #[pallet::call_index(3)] + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn bloat(_origin: OriginFor, _garbage: Vec<[u8; VALUE_SIZE]>) -> DispatchResult { + Ok(()) + } + + /// Set how much of the block length should be filled with trash data on each block. + /// + /// `1.0` means that all block should be filled. If set to `1.0`, storage proof size will + /// be close to zero. + /// + /// Only callable by Root or `AdminOrigin`. + #[pallet::call_index(4)] + #[pallet::weight({1})] + pub fn set_block_length(origin: OriginFor, block_length: FixedU64) -> DispatchResult { + T::AdminOrigin::ensure_origin_or_root(origin)?; + + ensure!(block_length <= RESOURCE_HARD_LIMIT, Error::::InsaneLimit); + Length::::set(block_length); + + Self::deposit_event(Event::BlockLengthLimitSet { block_length }); + Ok(()) + } } impl Pallet { diff --git a/substrate/frame/glutton/src/mock.rs b/substrate/frame/glutton/src/mock.rs index 132ef5cfbcbbabb077b7968cc0ae1451bcd06bc7..7163d7c46781f331222ee6dbbc9cf4807b2ecb1e 100644 --- a/substrate/frame/glutton/src/mock.rs +++ b/substrate/frame/glutton/src/mock.rs @@ -50,10 +50,14 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -/// Set the `compute` and `storage` limits. +/// Set the `compute`, `storage` and `block_length` limits. /// /// `1.0` corresponds to `100%`. -pub fn set_limits(compute: f64, storage: f64) { +pub fn set_limits(compute: f64, storage: f64, block_length: f64) { assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), FixedU64::from_float(compute))); assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), FixedU64::from_float(storage))); + assert_ok!(Glutton::set_block_length( + RuntimeOrigin::root(), + FixedU64::from_float(block_length) + )); } diff --git a/substrate/frame/glutton/src/tests.rs b/substrate/frame/glutton/src/tests.rs index b72d5272772540e5106af8cec248170cfcf910d6..81d228f39a9363961dd7e64cc952414d07ab1a33 100644 --- a/substrate/frame/glutton/src/tests.rs +++ b/substrate/frame/glutton/src/tests.rs @@ -123,6 +123,43 @@ fn setting_compute_respects_limit() { }); } +#[test] +fn setting_block_length_works() { + new_test_ext().execute_with(|| { + assert_eq!(Compute::::get(), Zero::zero()); + + assert_ok!(Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_float(0.3))); + assert_eq!(Length::::get(), FixedU64::from_float(0.3)); + System::assert_last_event( + Event::BlockLengthLimitSet { block_length: FixedU64::from_float(0.3) }.into(), + ); + + assert_noop!( + Glutton::set_block_length(RuntimeOrigin::signed(1), FixedU64::from_float(0.5)), + DispatchError::BadOrigin + ); + assert_noop!( + Glutton::set_block_length(RuntimeOrigin::none(), FixedU64::from_float(0.5)), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn setting_block_length_respects_limit() { + new_test_ext().execute_with(|| { + // < 1000% is fine + assert_ok!(Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_float(9.99)),); + // == 1000% is fine + assert_ok!(Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_u32(10)),); + // > 1000% is not + assert_noop!( + Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_float(10.01)), + Error::::InsaneLimit + ); + }); +} + #[test] fn setting_storage_works() { new_test_ext().execute_with(|| { @@ -163,7 +200,7 @@ fn setting_storage_respects_limit() { #[test] fn on_idle_works() { new_test_ext().execute_with(|| { - set_limits(One::one(), One::one()); + set_limits(One::one(), One::one(), One::one()); Glutton::on_idle(1, Weight::from_parts(20_000_000, 0)); }); @@ -173,7 +210,7 @@ fn on_idle_works() { #[test] fn on_idle_weight_high_proof_is_close_enough_works() { new_test_ext().execute_with(|| { - set_limits(One::one(), One::one()); + set_limits(One::one(), One::one(), One::one()); let should = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, WEIGHT_PROOF_SIZE_PER_MB * 5); let got = Glutton::on_idle(1, should); @@ -196,7 +233,7 @@ fn on_idle_weight_high_proof_is_close_enough_works() { #[test] fn on_idle_weight_low_proof_is_close_enough_works() { new_test_ext().execute_with(|| { - set_limits(One::one(), One::one()); + set_limits(One::one(), One::one(), One::one()); let should = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, WEIGHT_PROOF_SIZE_PER_KB * 20); let got = Glutton::on_idle(1, should); @@ -224,7 +261,7 @@ fn on_idle_weight_over_unity_is_close_enough_works() { let max_block = Weight::from_parts(500 * WEIGHT_REF_TIME_PER_MILLIS, 5 * WEIGHT_PROOF_SIZE_PER_MB); // But now we tell it to consume more than that. - set_limits(1.75, 1.5); + set_limits(1.75, 1.5, 0.0); let want = Weight::from_parts( (1.75 * max_block.ref_time() as f64) as u64, (1.5 * max_block.proof_size() as f64) as u64, diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 37048b06608f613f0e0c724314b7fb2093dec619..e08af3a5e91b31a00b6fd8d6f5a9f7764e7c8993 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -16,33 +16,33 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-authorship = { path = "../authorship", default-features = false } -pallet-session = { path = "../session", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -sp-consensus-grandpa = { path = "../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +pallet-session = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-std = { workspace = true } [dev-dependencies] -finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -frame-benchmarking = { path = "../benchmarking" } -frame-election-provider-support = { path = "../election-provider-support" } -pallet-balances = { path = "../balances" } -pallet-offences = { path = "../offences" } -pallet-staking = { path = "../staking" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -pallet-timestamp = { path = "../timestamp" } -sp-keyring = { path = "../../primitives/keyring" } +finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-offences = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 38b5536bc598b33520575b038dd220e962bd7215..5ba7da7f9fda2a6e0d30f1b04b7aac1e808c55cb 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -108,20 +108,11 @@ impl pallet_authorship::Config for Test { type EventHandler = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_timestamp::Config for Test { @@ -159,35 +150,22 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBoundsOnChain; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { - type RewardRemainder = (); - type CurrencyToVote = (); - type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = ::Balance; - type Slash = (); - type Reward = (); type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxExposurePageSize = ConstU32<64>; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 987e418048d3629b0f2c042d898fe7e2d925a22c..d0b796cd75d351ba2e5df0d791d932babad11c08 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -enumflags2 = { version = "0.7.7" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +enumflags2 = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index b1a953d487ce29bdd27360d61e22d163656ad7db..09edd5de79bb2ff9ba483dbd03c6b24ad59a28de 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -61,20 +61,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 78192a81d7b461d0c834662b8bd0e4c50ea31333..85cbcb3941b17b8675049196d8d60ad201e40916 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -16,22 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-authorship = { path = "../authorship", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-session = { path = "../session" } +pallet-session = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index 248bae003ed856a828ce82863ad2fadfb7f0a927..20c3863ff99b0743100c9d3354a9e5efb43ffc71 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-keyring = { path = "../../primitives/keyring", optional = true, default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keyring = { optional = true, workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index 7a8ff98f6d4ae507f1af99a713390528c8a9d0f0..72bbc6dab4a42b7c3556aa986a770ff3fbf24bdc 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -42,20 +42,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index c2ec14cb4bc77c5668750684ae983b41ac46e780..0da0d5373953b7703c095e86a9dbf2ec09fba2a6 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -safe-mix = { version = "1.0", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +safe-mix = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/insecure-randomness-collective-flip/README.md b/substrate/frame/insecure-randomness-collective-flip/README.md index 4f02782fa65910068ea9d0345b06156d06f8ca57..fc38367bf55202e2a4af52f2ed1083a3e1606441 100644 --- a/substrate/frame/insecure-randomness-collective-flip/README.md +++ b/substrate/frame/insecure-randomness-collective-flip/README.md @@ -44,7 +44,7 @@ pub mod pallet { impl Pallet { #[pallet::weight(0)] pub fn random_module_example(origin: OriginFor) -> DispatchResult { - let _random_value = >::random(&b"my context"[..]); + let _random_value = pallet_insecure_randomness_collective_flip::Pallet::::random(&b"my context"[..]); Ok(()) } } diff --git a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs index bdb089a14200c679b4ad9d5c5404fffbf8d19a8e..b605b4d08582be2613bad2662fdc4de30c5dfe4c 100644 --- a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs +++ b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs @@ -60,7 +60,7 @@ //! impl Pallet { //! #[pallet::weight(0)] //! pub fn random_module_example(origin: OriginFor) -> DispatchResult { -//! let _random_value = >::random(&b"my context"[..]); +//! let _random_value = pallet_insecure_randomness_collective_flip::Pallet::::random(&b"my context"[..]); //! Ok(()) //! } //! } @@ -101,9 +101,9 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(block_number: BlockNumberFor) -> Weight { - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); - >::mutate(|ref mut values| { + RandomMaterial::::mutate(|ref mut values| { if values.try_push(parent_hash).is_err() { let index = block_number_to_index::(block_number); values[index] = parent_hash; @@ -118,9 +118,15 @@ pub mod pallet { /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of /// the oldest hash. #[pallet::storage] - #[pallet::getter(fn random_material)] - pub(super) type RandomMaterial = + pub type RandomMaterial = StorageValue<_, BoundedVec>, ValueQuery>; + + impl Pallet { + /// Gets the random material storage value + pub fn random_material() -> BoundedVec> { + RandomMaterial::::get() + } + } } impl Randomness> for Pallet { @@ -135,10 +141,10 @@ impl Randomness> for Pallet { /// and mean that all bits of the resulting value are entirely manipulatable by the author of /// the parent block, who can determine the value of `parent_hash`. fn random(subject: &[u8]) -> (T::Hash, BlockNumberFor) { - let block_number = >::block_number(); + let block_number = frame_system::Pallet::::block_number(); let index = block_number_to_index::(block_number); - let hash_series = >::get(); + let hash_series = RandomMaterial::::get(); let seed = if !hash_series.is_empty() { // Always the case after block 1 is initialized. hash_series @@ -226,7 +232,7 @@ mod tests { setup_blocks(38); - let random_material = CollectiveFlip::random_material(); + let random_material = RandomMaterial::::get(); assert_eq!(random_material.len(), 38); assert_eq!(random_material[0], genesis_hash); @@ -240,7 +246,7 @@ mod tests { setup_blocks(81); - let random_material = CollectiveFlip::random_material(); + let random_material = RandomMaterial::::get(); assert_eq!(random_material.len(), 81); assert_ne!(random_material[0], random_material[1]); @@ -255,7 +261,7 @@ mod tests { setup_blocks(162); - let random_material = CollectiveFlip::random_material(); + let random_material = RandomMaterial::::get(); assert_eq!(random_material.len(), 81); assert_ne!(random_material[0], random_material[1]); @@ -276,7 +282,7 @@ mod tests { assert_eq!(known_since, 162 - RANDOM_MATERIAL_LEN as u64); assert_ne!(random, H256::zero()); - assert!(!CollectiveFlip::random_material().contains(&random)); + assert!(!RandomMaterial::::get().contains(&random)); }); } } diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index be59e5ec8935102bd03782f7c745775c46bacc18..34d1728e42ed0df444530cd4cdd6f039f2e12da4 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -15,21 +15,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -frame-support-test = { path = "../support/test" } -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +frame-support-test = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs index 596e1a9d837d16486e8a2017b0ab679a02bc2dd6..d2c442e2ac6e5acd0b096308ebed02ad9fe0f78e 100644 --- a/substrate/frame/lottery/src/mock.rs +++ b/substrate/frame/lottery/src/mock.rs @@ -22,7 +22,7 @@ use crate as pallet_lottery; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, + traits::{ConstU32, OnFinalize, OnInitialize}, }; use frame_support_test::TestRandomness; use frame_system::EnsureRoot; @@ -49,20 +49,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 9f19c40973687a41936372f6e116eb2dc8417306..35b0eeaa7141985c31ab857931e300da050aafcd 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -16,16 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index aa6be6497eea697235f71c2927b1adf6044052bd..d5dad68e811b008a94fc4b1b1f67ff0e454ba8d2 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -24,7 +24,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::{ - traits::{ChangeMembers, Contains, Get, InitializeMembers, SortedMembers}, + traits::{ChangeMembers, Contains, ContainsLengthBound, Get, InitializeMembers, SortedMembers}, BoundedVec, }; use sp_runtime::traits::{StaticLookup, UniqueSaturatedInto}; @@ -95,13 +95,11 @@ pub mod pallet { /// The current membership, stored as an ordered Vec. #[pallet::storage] - #[pallet::getter(fn members)] pub type Members, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// The current prime member, if one exists. #[pallet::storage] - #[pallet::getter(fn prime)] pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; #[pallet::genesis_config] @@ -126,7 +124,7 @@ pub mod pallet { let mut members = self.members.clone(); members.sort(); T::MembershipInitialized::initialize_members(&members); - >::put(members); + Members::::put(members); } } @@ -171,14 +169,14 @@ pub mod pallet { T::AddOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; - let mut members = >::get(); + let mut members = Members::::get(); let init_length = members.len(); let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; members .try_insert(location, who.clone()) .map_err(|_| Error::::TooManyMembers)?; - >::put(&members); + Members::::put(&members); T::MembershipChanged::change_members_sorted(&[who], &[], &members[..]); @@ -199,12 +197,12 @@ pub mod pallet { T::RemoveOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; - let mut members = >::get(); + let mut members = Members::::get(); let init_length = members.len(); let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; members.remove(location); - >::put(&members); + Members::::put(&members); T::MembershipChanged::change_members_sorted(&[], &[who], &members[..]); Self::rejig_prime(&members); @@ -233,13 +231,13 @@ pub mod pallet { return Ok(().into()); } - let mut members = >::get(); + let mut members = Members::::get(); let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; let _ = members.binary_search(&add).err().ok_or(Error::::AlreadyMember)?; members[location] = add.clone(); members.sort(); - >::put(&members); + Members::::put(&members); T::MembershipChanged::change_members_sorted(&[add], &[remove], &members[..]); Self::rejig_prime(&members); @@ -260,7 +258,7 @@ pub mod pallet { let mut members: BoundedVec = BoundedVec::try_from(members).map_err(|_| Error::::TooManyMembers)?; members.sort(); - >::mutate(|m| { + Members::::mutate(|m| { T::MembershipChanged::set_members_sorted(&members[..], m); Self::rejig_prime(&members); *m = members; @@ -288,14 +286,14 @@ pub mod pallet { return Ok(().into()); } - let mut members = >::get(); + let mut members = Members::::get(); let members_length = members.len() as u32; let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; members[location] = new.clone(); members.sort(); - >::put(&members); + Members::::put(&members); T::MembershipChanged::change_members_sorted( &[new.clone()], @@ -323,7 +321,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { T::PrimeOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; - let members = Self::members(); + let members = Members::::get(); members.binary_search(&who).ok().ok_or(Error::::NotMember)?; Prime::::put(&who); T::MembershipChanged::set_prime(Some(who)); @@ -345,6 +343,16 @@ pub mod pallet { } impl, I: 'static> Pallet { + /// The current membership, stored as an ordered `Vec`. + pub fn members() -> BoundedVec { + Members::::get() + } + + /// The current prime member, if one exists. + pub fn prime() -> Option { + Prime::::get() + } + fn rejig_prime(members: &[T::AccountId]) { if let Some(prime) = Prime::::get() { match members.binary_search(&prime) { @@ -357,13 +365,24 @@ impl, I: 'static> Pallet { impl, I: 'static> Contains for Pallet { fn contains(t: &T::AccountId) -> bool { - Self::members().binary_search(t).is_ok() + Members::::get().binary_search(t).is_ok() + } +} + +impl ContainsLengthBound for Pallet { + fn min_len() -> usize { + 0 + } + + /// Implementation uses a parameter type so calling is cost-free. + fn max_len() -> usize { + T::MaxMembers::get() as usize } } impl, I: 'static> SortedMembers for Pallet { fn sorted_members() -> Vec { - Self::members().to_vec() + Members::::get().to_vec() } fn count() -> usize { @@ -398,12 +417,12 @@ mod benchmark { let prime_origin = T::PrimeOrigin::try_successful_origin() .expect("PrimeOrigin has no successful origin required for the benchmark"); - assert_ok!(>::reset_members(reset_origin, members.clone())); + assert_ok!(Membership::::reset_members(reset_origin, members.clone())); if let Some(prime) = prime.map(|i| members[i].clone()) { let prime_lookup = T::Lookup::unlookup(prime); - assert_ok!(>::set_prime(prime_origin, prime_lookup)); + assert_ok!(Membership::::set_prime(prime_origin, prime_lookup)); } else { - assert_ok!(>::clear_prime(prime_origin)); + assert_ok!(Membership::::clear_prime(prime_origin)); } } @@ -416,12 +435,12 @@ mod benchmark { let new_member = account::("add", m, SEED); let new_member_lookup = T::Lookup::unlookup(new_member.clone()); }: { - assert_ok!(>::add_member( + assert_ok!(Membership::::add_member( T::AddOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, new_member_lookup, )); } verify { - assert!(>::get().contains(&new_member)); + assert!(Members::::get().contains(&new_member)); #[cfg(test)] crate::tests::clean(); } @@ -436,14 +455,14 @@ mod benchmark { let to_remove = members.first().cloned().unwrap(); let to_remove_lookup = T::Lookup::unlookup(to_remove.clone()); }: { - assert_ok!(>::remove_member( + assert_ok!(Membership::::remove_member( T::RemoveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, to_remove_lookup, )); } verify { - assert!(!>::get().contains(&to_remove)); + assert!(!Members::::get().contains(&to_remove)); // prime is rejigged - assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); #[cfg(test)] crate::tests::clean(); } @@ -458,16 +477,16 @@ mod benchmark { let remove = members.first().cloned().unwrap(); let remove_lookup = T::Lookup::unlookup(remove.clone()); }: { - assert_ok!(>::swap_member( + assert_ok!(Membership::::swap_member( T::SwapOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, remove_lookup, add_lookup, )); } verify { - assert!(!>::get().contains(&remove)); - assert!(>::get().contains(&add)); + assert!(!Members::::get().contains(&remove)); + assert!(Members::::get().contains(&add)); // prime is rejigged - assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); #[cfg(test)] crate::tests::clean(); } @@ -479,15 +498,15 @@ mod benchmark { set_members::(members.clone(), Some(members.len() - 1)); let mut new_members = (m..2*m).map(|i| account("member", i, SEED)).collect::>(); }: { - assert_ok!(>::reset_members( + assert_ok!(Membership::::reset_members( T::ResetOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, new_members.clone(), )); } verify { new_members.sort(); - assert_eq!(>::get(), new_members); + assert_eq!(Members::::get(), new_members); // prime is rejigged - assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); #[cfg(test)] crate::tests::clean(); } @@ -503,12 +522,12 @@ mod benchmark { let add_lookup = T::Lookup::unlookup(add.clone()); whitelist!(prime); }: { - assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add_lookup)); + assert_ok!(Membership::::change_key(RawOrigin::Signed(prime.clone()).into(), add_lookup)); } verify { - assert!(!>::get().contains(&prime)); - assert!(>::get().contains(&add)); + assert!(!Members::::get().contains(&prime)); + assert!(Members::::get().contains(&add)); // prime is rejigged - assert_eq!(>::get().unwrap(), add); + assert_eq!(Prime::::get().unwrap(), add); #[cfg(test)] crate::tests::clean(); } @@ -519,12 +538,12 @@ mod benchmark { let prime_lookup = T::Lookup::unlookup(prime.clone()); set_members::(members, None); }: { - assert_ok!(>::set_prime( + assert_ok!(Membership::::set_prime( T::PrimeOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, prime_lookup, )); } verify { - assert!(>::get().is_some()); + assert!(Prime::::get().is_some()); assert!(::get_prime().is_some()); #[cfg(test)] crate::tests::clean(); } @@ -534,11 +553,11 @@ mod benchmark { let prime = members.last().cloned().unwrap(); set_members::(members, None); }: { - assert_ok!(>::clear_prime( + assert_ok!(Membership::::clear_prime( T::PrimeOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, )); } verify { - assert!(>::get().is_none()); + assert!(Prime::::get().is_none()); assert!(::get_prime().is_none()); #[cfg(test)] crate::tests::clean(); } @@ -655,7 +674,7 @@ mod tests { #[test] fn query_membership_works() { new_test_ext().execute_with(|| { - assert_eq!(Membership::members(), vec![10, 20, 30]); + assert_eq!(crate::Members::::get(), vec![10, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), vec![10, 20, 30]); }); } @@ -669,12 +688,12 @@ mod tests { Error::::NotMember ); assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); - assert_eq!(Membership::prime(), Some(20)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Prime::::get(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); assert_ok!(Membership::clear_prime(RuntimeOrigin::signed(5))); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Prime::::get(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } @@ -687,8 +706,11 @@ mod tests { Error::::AlreadyMember ); assert_ok!(Membership::add_member(RuntimeOrigin::signed(1), 15)); - assert_eq!(Membership::members(), vec![10, 15, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); + assert_eq!(crate::Members::::get(), vec![10, 15, 20, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); }); } @@ -702,10 +724,13 @@ mod tests { ); assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); assert_ok!(Membership::remove_member(RuntimeOrigin::signed(2), 20)); - assert_eq!(Membership::members(), vec![10, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![10, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } @@ -724,16 +749,19 @@ mod tests { assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 20, 20)); - assert_eq!(Membership::members(), vec![10, 20, 30]); - assert_eq!(Membership::prime(), Some(20)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![10, 20, 30]); + assert_eq!(crate::Prime::::get(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 10)); assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 10, 25)); - assert_eq!(Membership::members(), vec![20, 25, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![20, 25, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } @@ -741,8 +769,11 @@ mod tests { fn swap_member_works_that_does_not_change_order() { new_test_ext().execute_with(|| { assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 10, 5)); - assert_eq!(Membership::members(), vec![5, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); + assert_eq!(crate::Members::::get(), vec![5, 20, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); }); } @@ -770,10 +801,13 @@ mod tests { Error::::AlreadyMember ); assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 40)); - assert_eq!(Membership::members(), vec![20, 30, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), Some(40)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![20, 30, 40]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), Some(40)); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } @@ -781,8 +815,11 @@ mod tests { fn change_key_works_that_does_not_change_order() { new_test_ext().execute_with(|| { assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 5)); - assert_eq!(Membership::members(), vec![5, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); + assert_eq!(crate::Members::::get(), vec![5, 20, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); }); } @@ -803,16 +840,22 @@ mod tests { ); assert_ok!(Membership::reset_members(RuntimeOrigin::signed(4), vec![20, 40, 30])); - assert_eq!(Membership::members(), vec![20, 30, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), Some(20)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![20, 30, 40]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); assert_ok!(Membership::reset_members(RuntimeOrigin::signed(4), vec![10, 40, 30])); - assert_eq!(Membership::members(), vec![10, 30, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![10, 30, 40]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index be1d35a61eaa2c5d7b037e9fb8a144daf691c4fc..27ba77e5a62a9c6d2c31639aac5824ab0e8383cc 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -15,24 +15,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = [ - "derive", -] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-mmr-primitives = { path = "../../primitives/merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -env_logger = "0.11" -itertools = "0.11" +array-bytes = { workspace = true, default-features = true } +env_logger = { workspace = true } +itertools = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index 707b7980104d6fb364a6c7dbcc8e42300a743666..374ace8e4cff30a654664be225e36766c7003a28 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -297,6 +297,19 @@ where } } +/// Stateless ancestry proof verification. +pub fn verify_ancestry_proof( + root: H::Output, + ancestry_proof: primitives::AncestryProof, +) -> Result +where + H: traits::Hash, + L: primitives::FullLeaf, +{ + mmr::verify_ancestry_proof::(root, ancestry_proof) + .map_err(|_| Error::Verify.log_debug(("The ancestry proof is incorrect.", root))) +} + impl, I: 'static> Pallet { /// Build offchain key from `parent_hash` of block that originally added node `pos` to MMR. /// @@ -318,17 +331,14 @@ impl, I: 'static> Pallet { } /// Provide the parent number for the block that added `leaf_index` to the MMR. - fn leaf_index_to_parent_block_num( - leaf_index: LeafIndex, - leaves_count: LeafIndex, - ) -> BlockNumberFor { + fn leaf_index_to_parent_block_num(leaf_index: LeafIndex) -> BlockNumberFor { // leaves are zero-indexed and were added one per block since pallet activation, // while block numbers are one-indexed, so block number that added `leaf_idx` is: // `block_num = block_num_when_pallet_activated + leaf_idx + 1` // `block_num = (current_block_num - leaves_count) + leaf_idx + 1` // `parent_block_num = current_block_num - leaves_count + leaf_idx`. >::block_number() - .saturating_sub(leaves_count.saturated_into()) + .saturating_sub(Self::mmr_leaves().saturated_into()) .saturating_add(leaf_index.saturated_into()) } @@ -345,6 +355,15 @@ impl, I: 'static> Pallet { utils::block_num_to_leaf_index::>(block_num, first_mmr_block) } + /// Convert a block number into a leaf index. + pub fn block_num_to_leaf_count(block_num: BlockNumberFor) -> Result + where + T: frame_system::Config, + { + let leaf_index = Self::block_num_to_leaf_index(block_num)?; + Ok(leaf_index.saturating_add(1)) + } + /// Generate an MMR proof for the given `block_numbers`. /// If `best_known_block_number = Some(n)`, this generates a historical proof for /// the chain with head at height `n`. @@ -362,8 +381,7 @@ impl, I: 'static> Pallet { let best_known_block_number = best_known_block_number.unwrap_or_else(|| >::block_number()); - let leaves_count = - Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); + let leaf_count = Self::block_num_to_leaf_count(best_known_block_number)?; // we need to translate the block_numbers into leaf indices. let leaf_indices = block_numbers @@ -373,7 +391,7 @@ impl, I: 'static> Pallet { }) .collect::, _>>()?; - let mmr: ModuleMmr = mmr::Mmr::new(leaves_count); + let mmr: ModuleMmr = mmr::Mmr::new(leaf_count); mmr.generate_proof(leaf_indices) } @@ -389,7 +407,7 @@ impl, I: 'static> Pallet { ) -> Result<(), primitives::Error> { if proof.leaf_count > NumberOfLeaves::::get() || proof.leaf_count == 0 || - (proof.items.len().saturating_add(leaves.len())) as u64 > proof.leaf_count + proof.items.len().saturating_add(leaves.len()) as u64 > proof.leaf_count { return Err(primitives::Error::Verify .log_debug("The proof has incorrect number of leaves or proof items.")) @@ -412,24 +430,18 @@ impl, I: 'static> Pallet { let best_known_block_number = best_known_block_number.unwrap_or_else(|| >::block_number()); - let leaf_count = Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); - let prev_leaf_count = Self::block_num_to_leaf_index(prev_block_number)?.saturating_add(1); + let leaf_count = Self::block_num_to_leaf_count(best_known_block_number)?; + let prev_leaf_count = Self::block_num_to_leaf_count(prev_block_number)?; let mmr: ModuleMmr = mmr::Mmr::new(leaf_count); mmr.generate_ancestry_proof(prev_leaf_count) } pub fn verify_ancestry_proof( + root: HashOf, ancestry_proof: primitives::AncestryProof>, - ) -> Result<(), Error> { - let mmr: ModuleMmr = - mmr::Mmr::new(ancestry_proof.leaf_count); - let is_valid = mmr.verify_ancestry_proof(ancestry_proof)?; - if is_valid { - Ok(()) - } else { - Err(Error::Verify.log_debug("The ancestry proof is incorrect.")) - } + ) -> Result, Error> { + verify_ancestry_proof::, LeafOf>(root, ancestry_proof) } /// Return the on-chain MMR root hash. diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs index 5efc172d1e93f1d7a56e3b36da20f93c6e1df1f9..8a99f4d87deb02df9c86f449317a2d8a7baa484e 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -60,6 +60,42 @@ where .map_err(|e| Error::Verify.log_debug(e)) } +pub fn verify_ancestry_proof( + root: H::Output, + ancestry_proof: primitives::AncestryProof, +) -> Result +where + H: sp_runtime::traits::Hash, + L: primitives::FullLeaf, +{ + let mmr_size = NodesUtils::new(ancestry_proof.leaf_count).size(); + + let prev_peaks_proof = mmr_lib::NodeMerkleProof::, Hasher>::new( + mmr_size, + ancestry_proof + .items + .into_iter() + .map(|(index, hash)| (index, Node::Hash(hash))) + .collect(), + ); + + let raw_ancestry_proof = mmr_lib::AncestryProof::, Hasher> { + prev_peaks: ancestry_proof.prev_peaks.into_iter().map(|hash| Node::Hash(hash)).collect(), + prev_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1), + proof: prev_peaks_proof, + }; + + let prev_root = mmr_lib::ancestry_proof::bagging_peaks_hashes::, Hasher>( + raw_ancestry_proof.prev_peaks.clone(), + ) + .map_err(|e| Error::Verify.log_debug(e))?; + raw_ancestry_proof + .verify_ancestor(Node::Hash(root), prev_root.clone()) + .map_err(|e| Error::Verify.log_debug(e))?; + + Ok(prev_root.hash()) +} + /// A wrapper around an MMR library to expose limited functionality. /// /// Available functions depend on the storage kind ([Runtime](crate::mmr::storage::RuntimeStorage) @@ -119,44 +155,6 @@ where .map_err(|e| Error::Verify.log_debug(e)) } - pub fn verify_ancestry_proof( - &self, - ancestry_proof: primitives::AncestryProof>, - ) -> Result { - let prev_peaks_proof = - mmr_lib::NodeMerkleProof::, Hasher, L>>::new( - self.mmr.mmr_size(), - ancestry_proof - .items - .into_iter() - .map(|(index, hash)| (index, Node::Hash(hash))) - .collect(), - ); - - let raw_ancestry_proof = mmr_lib::AncestryProof::< - NodeOf, - Hasher, L>, - > { - prev_peaks: ancestry_proof - .prev_peaks - .into_iter() - .map(|hash| Node::Hash(hash)) - .collect(), - prev_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1), - proof: prev_peaks_proof, - }; - - let prev_root = mmr_lib::ancestry_proof::bagging_peaks_hashes::< - NodeOf, - Hasher, L>, - >(raw_ancestry_proof.prev_peaks.clone()) - .map_err(|e| Error::Verify.log_debug(e))?; - let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; - raw_ancestry_proof - .verify_ancestor(root, prev_root) - .map_err(|e| Error::Verify.log_debug(e)) - } - /// Return the internal size of the MMR (number of nodes). #[cfg(test)] pub fn size(&self) -> NodeIndex { diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs index 93fefe910e45df386d0464b455e6f55c44aaa8e0..5b73f53506e92f3c0d8b8ffbd876cc6fafc987b9 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs @@ -21,7 +21,7 @@ pub mod storage; use sp_mmr_primitives::{mmr_lib, DataOrHash, FullLeaf}; use sp_runtime::traits; -pub use self::mmr::{verify_leaves_proof, Mmr}; +pub use self::mmr::{verify_ancestry_proof, verify_leaves_proof, Mmr}; /// Node type for runtime `T`. pub type NodeOf = Node<>::Hashing, L>; diff --git a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs index 6848b8f1b9906b85bfc7c3b9ca9d5a52b4ddaed8..e27440be35c45cdf4823f3cdb542f07197e302da 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs @@ -67,7 +67,6 @@ where L: primitives::FullLeaf + codec::Decode, { fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result>> { - let leaves = NumberOfLeaves::::get(); // Find out which leaf added node `pos` in the MMR. let ancestor_leaf_idx = NodesUtils::leaf_index_that_added_node(pos); @@ -86,7 +85,7 @@ where // Fall through to searching node using fork-specific key. let ancestor_parent_block_num = - Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); + Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx); let ancestor_parent_hash = T::BlockHashProvider::block_hash(ancestor_parent_block_num); let temp_key = Pallet::::node_temp_offchain_key(pos, ancestor_parent_hash); debug!( diff --git a/substrate/frame/merkle-mountain-range/src/tests.rs b/substrate/frame/merkle-mountain-range/src/tests.rs index f8cfcb4e2c286f949207826990ba715485247ca2..b8c9d54db8209bffa0ee068b03a6ea5ac779e3f4 100644 --- a/substrate/frame/merkle-mountain-range/src/tests.rs +++ b/substrate/frame/merkle-mountain-range/src/tests.rs @@ -792,16 +792,28 @@ fn does_not_panic_when_generating_historical_proofs() { fn generating_and_verifying_ancestry_proofs_works_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); - ext.execute_with(|| add_blocks(500)); + + let mut prev_roots = vec![]; + ext.execute_with(|| { + for _ in 1..=500 { + add_blocks(1); + prev_roots.push(Pallet::::mmr_root()) + } + }); ext.persist_offchain_overlay(); register_offchain_ext(&mut ext); ext.execute_with(|| { + let root = Pallet::::mmr_root(); // Check that generating and verifying ancestry proofs works correctly // for each previous block - for prev_block_number in 1..501 { - let proof = Pallet::::generate_ancestry_proof(prev_block_number, None).unwrap(); - Pallet::::verify_ancestry_proof(proof).unwrap(); + for prev_block_number in 1usize..=500 { + let proof = + Pallet::::generate_ancestry_proof(prev_block_number as u64, None).unwrap(); + assert_eq!( + Pallet::::verify_ancestry_proof(root, proof), + Ok(prev_roots[prev_block_number - 1]) + ); } // Check that we can't generate ancestry proofs for a future block. diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index e44cbeb1550ccfdb0daf8e871e924df261a59227..5ced1a4e08ff9a4d6fe328f7db76eef23f7c69f1 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -12,28 +12,28 @@ description = "FRAME pallet to queue and process messages" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } -environmental = { version = "1.1.4", default-features = false } +environmental = { workspace = true } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-weights = { path = "../../primitives/weights", default-features = false } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-arithmetic = { workspace = true } +sp-weights = { workspace = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } [dev-dependencies] -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-tracing = { path = "../../primitives/tracing" } -rand = "0.8.5" -rand_distr = "0.4.3" +sp-crypto-hashing = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +rand_distr = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml index 13d4bd0c2ea909700dda8d1ec621892c4a728724..10d90bba0911bc1830a29ede8c9dbe78e9e45716 100644 --- a/substrate/frame/metadata-hash-extension/Cargo.toml +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -9,23 +9,23 @@ repository.workspace = true description = "FRAME signed extension for verifying the metadata hash" [dependencies] -array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -log = { workspace = true, default-features = false } -docify = "0.2.8" +array-bytes = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +log = { workspace = true } +docify = { workspace = true } [dev-dependencies] -substrate-wasm-builder = { path = "../../utils/wasm-builder", features = ["metadata-hash"] } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -sp-api = { path = "../../primitives/api" } -sp-transaction-pool = { path = "../../primitives/transaction-pool" } -merkleized-metadata = "0.1.0" -frame-metadata = { version = "16.0.0", features = ["current"] } -sp-tracing = { path = "../../primitives/tracing" } +substrate-wasm-builder = { features = ["metadata-hash"], workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +sp-api = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +merkleized-metadata = { workspace = true } +frame-metadata = { features = ["current"], workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index 69e910a4e4f6e56b2b848f9b0f402da40d73f77d..5a946d3aa05816ef258ef3ce47c65bbf2fdc08d2 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -11,28 +11,28 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -docify = "0.2.8" -impl-trait-for-tuples = "0.2.2" -log = "0.4.21" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +impl-trait-for-tuples = { workspace = true } +log = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } -frame-support = { default-features = false, path = "../support" } -frame-system = { default-features = false, path = "../system" } -sp-core = { path = "../../primitives/core", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -frame-executive = { path = "../executive" } -sp-api = { path = "../../primitives/api", features = ["std"] } -sp-block-builder = { path = "../../primitives/block-builder", features = ["std"] } -sp-io = { path = "../../primitives/io", features = ["std"] } -sp-tracing = { path = "../../primitives/tracing", features = ["std"] } -sp-version = { path = "../../primitives/version", features = ["std"] } +frame-executive = { workspace = true, default-features = true } +sp-api = { features = ["std"], workspace = true, default-features = true } +sp-block-builder = { features = ["std"], workspace = true, default-features = true } +sp-io = { features = ["std"], workspace = true, default-features = true } +sp-tracing = { features = ["std"], workspace = true, default-features = true } +sp-version = { features = ["std"], workspace = true, default-features = true } -pretty_assertions = "1.3.0" +pretty_assertions = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 44a567d668fb3c3452b4492831b0be66595a7c05..f3bea3d2914752930a4164b8c19ab6ce0c444018 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } -frame-support = { default-features = false, path = "../support" } -frame-system = { default-features = false, path = "../system" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true } -sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } -sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } -sp-io = { default-features = false, path = "../../primitives/io" } -sp-mixnet = { default-features = false, path = "../../primitives/mixnet" } -sp-runtime = { default-features = false, path = "../../primitives/runtime" } -sp-std = { default-features = false, path = "../../primitives/std" } +sp-application-crypto = { workspace = true } +sp-arithmetic = { workspace = true } +sp-io = { workspace = true } +sp-mixnet = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index 649a7100325f931e940760688710f88a1fbb6154..329b2e8824e2449ae99ad55f9a6796fd35f1c044 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -16,20 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # third party log = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index e2a7e34c637b430dbda56f1690938c1cde2b100e..f0613581257e9418e38990e6f74a45065d076493 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -16,22 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-assets = { path = "../assets", default-features = false } -pallet-nfts = { path = "../nfts", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true } +pallet-nfts = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -sp-std = { path = "../../primitives/std" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index 82a608816260041d6887af80334b423de1c4f744..50b41b5fc64e101713f326444c2efaa326093fde 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -57,20 +57,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = (); } impl pallet_assets::Config for Test { diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 5c5c011c94ea27467e5b8bbd18fda890a181acf2..a1eef3226ecf7ac1b7f5ae91fa471854691dd7fc 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -enumflags2 = { version = "0.7.7" } +codec = { workspace = true } +enumflags2 = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-keystore = { path = "../../primitives/keystore" } +pallet-balances = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/nfts/runtime-api/Cargo.toml b/substrate/frame/nfts/runtime-api/Cargo.toml index 6bee98fb51e0c908dfb70d28891e91cb55a7c6ea..731f5f5ae7767f918d5cd6f775f4a48c2cc7d6ac 100644 --- a/substrate/frame/nfts/runtime-api/Cargo.toml +++ b/substrate/frame/nfts/runtime-api/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -pallet-nfts = { path = "..", default-features = false } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +pallet-nfts = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/nfts/src/benchmarking.rs b/substrate/frame/nfts/src/benchmarking.rs index 8792af675fc16de7c18f07312aa74e4bab46d3db..80860bc5a53c432685abd7144e92b85e0c697edf 100644 --- a/substrate/frame/nfts/src/benchmarking.rs +++ b/substrate/frame/nfts/src/benchmarking.rs @@ -30,11 +30,7 @@ use frame_support::{ BoundedVec, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin as SystemOrigin}; -use sp_io::crypto::{sr25519_generate, sr25519_sign}; -use sp_runtime::{ - traits::{Bounded, IdentifyAccount, One}, - AccountId32, MultiSignature, MultiSigner, -}; +use sp_runtime::traits::{Bounded, One}; use sp_std::prelude::*; use crate::Pallet as Nfts; @@ -229,12 +225,6 @@ fn make_filled_vec(value: u16, length: usize) -> Vec { } benchmarks_instance_pallet! { - where_clause { - where - T::OffchainSignature: From, - T::AccountId: From, - } - create { let collection = T::Helper::collection(0); let origin = T::CreateOrigin::try_successful_origin(&collection) @@ -800,8 +790,7 @@ benchmarks_instance_pallet! { mint_pre_signed { let n in 0 .. T::MaxAttributesPerCall::get() as u32; - let caller_public = sr25519_generate(0.into(), None); - let caller = MultiSigner::Sr25519(caller_public).into_account().into(); + let (caller_public, caller) = T::Helper::signer(); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); let caller_lookup = T::Lookup::unlookup(caller.clone()); @@ -830,7 +819,7 @@ benchmarks_instance_pallet! { mint_price: Some(DepositBalanceOf::::min_value()), }; let message = Encode::encode(&mint_data); - let signature = MultiSignature::Sr25519(sr25519_sign(0.into(), &caller_public, &message).unwrap()); + let signature = T::Helper::sign(&caller_public, &message); let target: T::AccountId = account("target", 0, SEED); T::Currency::make_free_balance_be(&target, DepositBalanceOf::::max_value()); @@ -848,8 +837,7 @@ benchmarks_instance_pallet! { let item_owner: T::AccountId = account("item_owner", 0, SEED); let item_owner_lookup = T::Lookup::unlookup(item_owner.clone()); - let signer_public = sr25519_generate(0.into(), None); - let signer: T::AccountId = MultiSigner::Sr25519(signer_public).into_account().into(); + let (signer_public, signer) = T::Helper::signer(); T::Currency::make_free_balance_be(&item_owner, DepositBalanceOf::::max_value()); @@ -876,7 +864,7 @@ benchmarks_instance_pallet! { deadline: One::one(), }; let message = Encode::encode(&pre_signed_data); - let signature = MultiSignature::Sr25519(sr25519_sign(0.into(), &signer_public, &message).unwrap()); + let signature = T::Helper::sign(&signer_public, &message); frame_system::Pallet::::set_block_number(One::one()); }: _(SystemOrigin::Signed(item_owner.clone()), pre_signed_data, signature.into(), signer.clone()) diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index 615720268fed611bbf7effa1f4b71eca0df4a7e7..0406cac6e2c983da4e7ef3db9e4fd506c1803506 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -84,18 +84,42 @@ pub mod pallet { pub struct Pallet(PhantomData<(T, I)>); #[cfg(feature = "runtime-benchmarks")] - pub trait BenchmarkHelper { + pub trait BenchmarkHelper { fn collection(i: u16) -> CollectionId; fn item(i: u16) -> ItemId; + fn signer() -> (Public, AccountId); + fn sign(signer: &Public, message: &[u8]) -> Signature; } #[cfg(feature = "runtime-benchmarks")] - impl, ItemId: From> BenchmarkHelper for () { + impl + BenchmarkHelper< + CollectionId, + ItemId, + sp_runtime::MultiSigner, + sp_runtime::AccountId32, + sp_runtime::MultiSignature, + > for () + where + CollectionId: From, + ItemId: From, + { fn collection(i: u16) -> CollectionId { i.into() } fn item(i: u16) -> ItemId { i.into() } + fn signer() -> (sp_runtime::MultiSigner, sp_runtime::AccountId32) { + let public = sp_io::crypto::sr25519_generate(0.into(), None); + let account = sp_runtime::MultiSigner::Sr25519(public).into_account(); + (public.into(), account) + } + fn sign(signer: &sp_runtime::MultiSigner, message: &[u8]) -> sp_runtime::MultiSignature { + sp_runtime::MultiSignature::Sr25519( + sp_io::crypto::sr25519_sign(0.into(), &signer.clone().try_into().unwrap(), message) + .unwrap(), + ) + } } #[pallet::config] @@ -206,7 +230,13 @@ pub mod pallet { #[cfg(feature = "runtime-benchmarks")] /// A set of helper functions for benchmarking. - type Helper: BenchmarkHelper; + type Helper: BenchmarkHelper< + Self::CollectionId, + Self::ItemId, + Self::OffchainPublic, + Self::AccountId, + Self::OffchainSignature, + >; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index 51cfd5f244bcdb14fcdb1f66cff245df9030185f..5b589f591ca347a670becdc9557f24ec1a4e8520 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -53,20 +53,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index 1e3a0609c46bbfc40b044eae839f9bd0303e0178..418c906e2b66cfae53d475ff0007336cbe93b5ea 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-io = { path = "../../primitives/io" } +pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/nis/src/lib.rs b/substrate/frame/nis/src/lib.rs index f38755836fb9f15dd6db8eaab38f206dea43b562..d815ea6ac115cc3bc3ddbf45d74a6f7c252e5455 100644 --- a/substrate/frame/nis/src/lib.rs +++ b/substrate/frame/nis/src/lib.rs @@ -755,7 +755,13 @@ pub mod pallet { // We ignore this error as it just means the amount we're trying to deposit is // dust and the beneficiary account doesn't exist. .or_else( - |e| if e == TokenError::CannotCreate.into() { Ok(()) } else { Err(e) }, + |e| { + if e == TokenError::CannotCreate.into() { + Ok(()) + } else { + Err(e) + } + }, )?; summary.receipts_on_hold.saturating_reduce(on_hold); } diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index 17ed16d2623368826f1ad93c6b4f5032775319cf..d16e9bc6ead6c3c4e17cd56e27ff504716725436 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -15,15 +15,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index bf4e01a3184793d638b1c87f5984a517a18e2ceb..535d5252435f38d3dd85fd0d8c923b29c2e00f34 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -16,30 +16,30 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # FRAME -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } log = { workspace = true } # Optional: use for testing and/or fuzzing -pallet-balances = { path = "../balances", optional = true, default-features = false } -sp-tracing = { path = "../../primitives/tracing", optional = true, default-features = false } +pallet-balances = { optional = true, workspace = true } +sp-tracing = { optional = true, workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-tracing = { path = "../../primitives/tracing" } +pallet-balances = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 3f9463a9c429b93da7cf5945a6ebd2ac46077196..1516f0154b88f91153e1c4872a297b56eb1bcb7b 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -17,31 +17,31 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # FRAME -frame-benchmarking = { path = "../../benchmarking", default-features = false } -frame-election-provider-support = { path = "../../election-provider-support", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-bags-list = { path = "../../bags-list", default-features = false } -pallet-staking = { path = "../../staking", default-features = false } -pallet-delegated-staking = { path = "../../delegated-staking", default-features = false } -pallet-nomination-pools = { path = "..", default-features = false } +frame-benchmarking = { workspace = true } +frame-election-provider-support = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-bags-list = { workspace = true } +pallet-staking = { workspace = true } +pallet-delegated-staking = { workspace = true } +pallet-nomination-pools = { workspace = true } # Substrate Primitives -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false } -sp-staking = { path = "../../../primitives/staking", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-staking = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../../balances", default-features = false } -pallet-timestamp = { path = "../../timestamp" } -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -sp-core = { path = "../../../primitives/core" } -sp-io = { path = "../../../primitives/io" } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 7cbb61e00a31a3a657831119546aa40dc385bae7..15d9e2c56031fe1e47b7b0cef3139ca177ddeb67 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -17,7 +17,13 @@ use crate::VoterBagsListInstance; use frame_election_provider_support::VoteWeight; -use frame_support::{derive_impl, pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; +use frame_support::{ + derive_impl, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, VariantCountOf}, + PalletId, +}; use sp_runtime::{ traits::{Convert, IdentityLookup}, BuildStorage, FixedU128, Perbill, @@ -45,20 +51,16 @@ impl pallet_timestamp::Config for Runtime { parameter_types! { pub const ExistentialDeposit: Balance = 10; } + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = (); + type RuntimeFreezeReason = RuntimeFreezeReason; } pallet_staking_reward_curve::build! { @@ -74,36 +76,19 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = ConstU32<3>; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; type EventListeners = (Pools, DelegatedStaking); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/nomination-pools/fuzzer/Cargo.toml b/substrate/frame/nomination-pools/fuzzer/Cargo.toml index c0d63a2685937a10e6a53288403099bc78ed1fa9..7c45dcef256fb9b91ec04d309794cf5cd61cda91 100644 --- a/substrate/frame/nomination-pools/fuzzer/Cargo.toml +++ b/substrate/frame/nomination-pools/fuzzer/Cargo.toml @@ -17,18 +17,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -honggfuzz = "0.5.54" +honggfuzz = { workspace = true } -pallet-nomination-pools = { path = "..", features = ["fuzzing"] } +pallet-nomination-pools = { features = ["fuzzing"], workspace = true, default-features = true } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-tracing = { path = "../../../primitives/tracing" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } -rand = { version = "0.8.5", features = ["small_rng"] } +rand = { features = ["small_rng"], workspace = true, default-features = true } log = { workspace = true, default-features = true } [[bin]] diff --git a/substrate/frame/nomination-pools/runtime-api/Cargo.toml b/substrate/frame/nomination-pools/runtime-api/Cargo.toml index a0ddac9e045675aefe7b8139e0048f3399663da8..2b217e021455fddac8fc1c7efe5719901c7f1d9f 100644 --- a/substrate/frame/nomination-pools/runtime-api/Cargo.toml +++ b/substrate/frame/nomination-pools/runtime-api/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -pallet-nomination-pools = { path = "..", default-features = false } +codec = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } +pallet-nomination-pools = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 2aaea04463661d6421588e791e7fc281cd872813..2b5fe8b604121db66c8db94ae27e70ba29ba7afd 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -494,7 +494,6 @@ impl ClaimPermission { frame_support::PartialEqNoBound, )] #[cfg_attr(feature = "std", derive(DefaultNoBound))] -#[codec(mel_bound(T: Config))] #[scale_info(skip_type_params(T))] pub struct PoolMember { /// The identifier of the pool to which `who` belongs. diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index 93fe6aa56054d9ec65b91884673d30d1651ca35d..6c0082073f6827a5d264281bc29e08b708233e35 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -18,7 +18,8 @@ use super::*; use crate::{self as pools}; use frame_support::{ - assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::fungible::Mutate, + assert_ok, derive_impl, ord_parameter_types, parameter_types, + traits::{fungible::Mutate, VariantCountOf}, PalletId, }; use frame_system::{EnsureSignedBy, RawOrigin}; @@ -251,20 +252,14 @@ parameter_types! { pub static ExistentialDeposit: Balance = 5; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = frame_support::traits::ConstU32<1024>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type MaxFreezes = VariantCountOf; + type RuntimeFreezeReason = RuntimeFreezeReason; } pub struct BalanceToU256; diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml index ea8eb20696931f8c45edc6f67e4d5af49412eccd..ea50dd6d732d30007afee75108cb3d62616712f8 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml @@ -16,26 +16,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = { version = "2.11.1", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-std = { path = "../../../primitives/std" } -sp-staking = { path = "../../../primitives/staking" } -sp-core = { path = "../../../primitives/core" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } -frame-election-provider-support = { path = "../../election-provider-support" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } -pallet-timestamp = { path = "../../timestamp" } -pallet-balances = { path = "../../balances" } -pallet-staking = { path = "../../staking" } -pallet-delegated-staking = { path = "../../delegated-staking" } -pallet-bags-list = { path = "../../bags-list" } -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -pallet-nomination-pools = { path = ".." } +pallet-timestamp = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-delegated-staking = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } -sp-tracing = { path = "../../../primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } log = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs index 820f2b7718ce4ee0d977a096aa132f87536bd8ee..ed47932a323bf14335d5bb74fec03cfa749625e3 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, parameter_types, - traits::{ConstU64, ConstU8}, + traits::{ConstU64, ConstU8, VariantCountOf}, PalletId, }; use frame_system::EnsureRoot; @@ -63,20 +63,15 @@ parameter_types! { pub static ExistentialDeposit: Balance = 5; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = (); + type RuntimeFreezeReason = RuntimeFreezeReason; } pallet_staking_reward_curve::build! { @@ -95,36 +90,20 @@ parameter_types! { pub static BondingDuration: u32 = 3; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; type EventListeners = (Pools, DelegatedStaking); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml index 5f9bc9af3a214eb3d82da11786c93bf462565a26..8bc5676cfe91640dcc8cf1fb6bd895db41a0f5a2 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml @@ -16,25 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = { version = "2.11.1", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-std = { path = "../../../primitives/std" } -sp-staking = { path = "../../../primitives/staking" } -sp-core = { path = "../../../primitives/core" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } -frame-election-provider-support = { path = "../../election-provider-support" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } -pallet-timestamp = { path = "../../timestamp" } -pallet-balances = { path = "../../balances" } -pallet-staking = { path = "../../staking" } -pallet-bags-list = { path = "../../bags-list" } -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -pallet-nomination-pools = { path = ".." } +pallet-timestamp = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } -sp-tracing = { path = "../../../primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } log = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs index eb9d463424c8e74089563d6dc2004ddb4a9ef2e5..d913c5fe6948cb5a0fae1d5ccf8e533e5ddb8cc7 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, parameter_types, - traits::{ConstU64, ConstU8}, + traits::{ConstU64, ConstU8, VariantCountOf}, PalletId, }; use sp_runtime::{ @@ -56,20 +56,14 @@ parameter_types! { pub static ExistentialDeposit: Balance = 5; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type MaxFreezes = VariantCountOf; + type RuntimeFreezeReason = RuntimeFreezeReason; } pallet_staking_reward_curve::build! { @@ -88,36 +82,20 @@ parameter_types! { pub static BondingDuration: u32 = 3; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index a59ef9334f0bc3cba6a92943a1cbbdbe4d3c94d1..51eb3e4f4381797202b8648e59a0378c56ae72e6 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -16,20 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-balances = { path = "../balances", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index bbd918a2883f30afb82310424375eb3ed5b5a8e8..037812b3ed114a0f5d7f8111ba83fe9da4ca9dd1 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -16,29 +16,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false } -frame-election-provider-support = { path = "../../election-provider-support", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-babe = { path = "../../babe", default-features = false } -pallet-balances = { path = "../../balances", default-features = false } -pallet-grandpa = { path = "../../grandpa", default-features = false } -pallet-im-online = { path = "../../im-online", default-features = false } -pallet-offences = { path = "..", default-features = false } -pallet-session = { path = "../../session", default-features = false } -pallet-staking = { path = "../../staking", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-staking = { path = "../../../primitives/staking", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { workspace = true } +frame-election-provider-support = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-im-online = { workspace = true } +pallet-offences = { workspace = true } +pallet-session = { workspace = true } +pallet-staking = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-std = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -pallet-timestamp = { path = "../../timestamp" } -sp-core = { path = "../../../primitives/core" } -sp-io = { path = "../../../primitives/io" } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 6cbdde5785282875c01cfa44c897cfa6d134fd55..e243ad0e718eb85fadca375dbf9e883635653972 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -41,20 +41,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_timestamp::Config for Test { @@ -134,35 +124,19 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBounds; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = (); type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_im_online::Config for Test { diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index f550e694349468efcca9bacf51242d378b64ba64..f5a3a2e5bae196736f9a07943fdc78a42436bba8 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -15,19 +15,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -docify = "0.2.8" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } - -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } - -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false, optional = true } +codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +scale-info = { features = ["derive"], workspace = true } + +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } + +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-metadata-ir = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/frame/paged-list/fuzzer/Cargo.toml b/substrate/frame/paged-list/fuzzer/Cargo.toml index 6ff07ba1ddd2fa3571e65a8e29c53e5ef324c0de..bfdacfd8dd03f0f31e901c63fcb9858923bbdd0a 100644 --- a/substrate/frame/paged-list/fuzzer/Cargo.toml +++ b/substrate/frame/paged-list/fuzzer/Cargo.toml @@ -17,9 +17,9 @@ name = "pallet-paged-list-fuzzer" path = "src/paged_list.rs" [dependencies] -arbitrary = "1.3.2" -honggfuzz = "0.5.49" +arbitrary = { workspace = true } +honggfuzz = { workspace = true } -frame-support = { path = "../../support", default-features = false, features = ["std"] } -sp-io = { path = "../../../primitives/io", default-features = false, features = ["std"] } -pallet-paged-list = { path = "..", default-features = false, features = ["std"] } +frame-support = { features = ["std"], workspace = true } +sp-io = { features = ["std"], workspace = true } +pallet-paged-list = { features = ["std"], workspace = true } diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index c4d6d189d2d2c2a33a428cdcf3290c629464662b..b993347d24637a5ffa84c238eeb6fb75bbcb9d64 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -8,24 +8,24 @@ authors = ["Acala Developers", "Parity Technologies "] edition.workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -paste = { version = "1.0.14", default-features = false } +codec = { features = ["max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +paste = { workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -docify = "0.2.8" +docify = { workspace = true } -frame-support = { path = "../support", default-features = false, features = ["experimental"] } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core", features = ["std"] } -sp-io = { path = "../../primitives/io", features = ["std"] } -pallet-example-basic = { path = "../examples/basic", features = ["std"] } -pallet-balances = { path = "../balances", features = ["std"] } +sp-core = { features = ["std"], workspace = true, default-features = true } +sp-io = { features = ["std"], workspace = true, default-features = true } +pallet-example-basic = { features = ["std"], workspace = true, default-features = true } +pallet-balances = { features = ["std"], workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/parameters/src/tests/mock.rs b/substrate/frame/parameters/src/tests/mock.rs index 6cfd7c8f30b8119ff1e1f0a63165c98afba1577b..53a3b3e394c4b7c5d8410ba6e1af758d78cbf8ae 100644 --- a/substrate/frame/parameters/src/tests/mock.rs +++ b/substrate/frame/parameters/src/tests/mock.rs @@ -37,7 +37,6 @@ impl frame_system::Config for Runtime { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/substrate/frame/parameters/src/tests/test_renamed.rs b/substrate/frame/parameters/src/tests/test_renamed.rs index cfc870fbe1096aeeab930d548e6455427962d62a..7c371c5e55f874c01fffde081bb65b551c75d5b3 100644 --- a/substrate/frame/parameters/src/tests/test_renamed.rs +++ b/substrate/frame/parameters/src/tests/test_renamed.rs @@ -39,7 +39,6 @@ impl frame_system::Config for Runtime { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index d420accbd6d914816d00a57879d7187f0d0d4ead..ee572b189b231054c0bea0e429c7a4659e9aa301 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -12,20 +12,20 @@ description = "FRAME pallet for storing preimages of hashes" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, optional = true } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { optional = true, workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core", default-features = false } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index d0c3404f40a911cf32d620858b97652b242f2be8..f2b76a7999d6c61827624b3b9ca6d9e55a0341d9 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -116,7 +116,7 @@ benchmarks! { T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap().unwrap(); let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index 4e474685166631ba41eed644d3754a332a290663..dd323a12b8f8479ba5d419243c9585f79ac0385a 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -122,7 +122,9 @@ pub mod pallet { type ManagerOrigin: EnsureOrigin; /// A means of providing some cost while data is stored on-chain. - type Consideration: Consideration; + /// + /// Should never return a `None`, implying no cost for a non-empty preimage. + type Consideration: Consideration; } #[pallet::pallet] @@ -158,6 +160,8 @@ pub mod pallet { TooMany, /// Too few hashes were requested to be upgraded (i.e. zero). TooFew, + /// No ticket with a cost was returned by [`Config::Consideration`] to store the preimage. + NoCost, } /// A reason for this pallet placing a hold on funds. @@ -268,10 +272,10 @@ impl Pallet { // unreserve deposit T::Currency::unreserve(&who, amount); // take consideration - let Ok(ticket) = + let Ok(Some(ticket)) = T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) - .defensive_proof("Unexpected inability to take deposit after unreserved") else { + defensive!("None ticket or inability to take deposit after unreserved"); return true }; RequestStatus::Unrequested { ticket: (who, ticket), len } @@ -282,12 +286,10 @@ impl Pallet { T::Currency::unreserve(&who, deposit); // take consideration if let Some(len) = maybe_len { - let Ok(ticket) = + let Ok(Some(ticket)) = T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) - .defensive_proof( - "Unexpected inability to take deposit after unreserved", - ) else { + defensive!("None ticket or inability to take deposit after unreserved"); return true }; Some((who, ticket)) @@ -347,7 +349,8 @@ impl Pallet { RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: Some(len) }, (None, Some(depositor)) => { let ticket = - T::Consideration::new(depositor, Footprint::from_parts(1, len as usize))?; + T::Consideration::new(depositor, Footprint::from_parts(1, len as usize))? + .ok_or(Error::::NoCost)?; RequestStatus::Unrequested { ticket: (depositor.clone(), ticket), len } }, }; diff --git a/substrate/frame/preimage/src/mock.rs b/substrate/frame/preimage/src/mock.rs index 903c34596aebacfd7116f1b172d54f9ed26f6426..9c72d09cae1463395aaebe1e06f95d5e22cced9f 100644 --- a/substrate/frame/preimage/src/mock.rs +++ b/substrate/frame/preimage/src/mock.rs @@ -22,7 +22,7 @@ use super::*; use crate as pallet_preimage; use frame_support::{ derive_impl, ord_parameter_types, parameter_types, - traits::{fungible::HoldConsideration, ConstU32, ConstU64}, + traits::{fungible::HoldConsideration, ConstU64}, }; use frame_system::EnsureSignedBy; use sp_core::H256; @@ -48,20 +48,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<5>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = (); } ord_parameter_types! { diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index fcebbb5f3e8a077c8505f333b073d384fb1646b2..6b4251e1bcc57b8017a6bb76c99fc3aea94d6345 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-utility = { path = "../utility" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 05ce76cad2bbe133a77e9c318ce8f32a57f3a659..be48d5cad4048b90e0b08971f6b005b125adaf59 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -16,18 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -impl-trait-for-tuples = "0.2.2" +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +impl-trait-for-tuples = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/ranked-collective/src/lib.rs b/substrate/frame/ranked-collective/src/lib.rs index ceaf03de211008ee49bf4a4a1fc961bf8825971a..53d5f0c6662d760534e87bf6962aca491fe4e617 100644 --- a/substrate/frame/ranked-collective/src/lib.rs +++ b/substrate/frame/ranked-collective/src/lib.rs @@ -379,6 +379,7 @@ pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, storage::KeyLenOf}; use frame_system::pallet_prelude::*; + use sp_runtime::traits::MaybeConvert; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -431,6 +432,14 @@ pub mod pallet { /// in the poll. type VoteWeight: Convert; + /// The maximum number of members for a given rank in the collective. + /// + /// The member at rank `x` contributes to the count at rank `x` and all ranks below it. + /// Therefore, the limit `m` at rank `x` sets the maximum total member count for rank `x` + /// and all ranks above. + /// The `None` indicates no member count limit for the given rank. + type MaxMemberCount: MaybeConvert; + /// Setup a member for benchmarking. #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup: BenchmarkSetup; @@ -511,6 +520,8 @@ pub mod pallet { NoPermission, /// The new member to exchange is the same as the old member SameMember, + /// The max member count for the rank has been reached. + TooManyMembers, } #[pallet::call] @@ -758,6 +769,9 @@ pub mod pallet { ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); let index = MemberCount::::get(0); let count = index.checked_add(1).ok_or(Overflow)?; + if let Some(max) = T::MaxMemberCount::maybe_convert(0) { + ensure!(count <= max, Error::::TooManyMembers); + } Members::::insert(&who, MemberRecord { rank: 0 }); IdToIndex::::insert(0, &who, index); @@ -784,6 +798,11 @@ pub mod pallet { ensure!(max_rank >= rank, Error::::NoPermission); } let index = MemberCount::::get(rank); + let count = index.checked_add(1).ok_or(Overflow)?; + if let Some(max) = T::MaxMemberCount::maybe_convert(rank) { + ensure!(count <= max, Error::::TooManyMembers); + } + MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); IdToIndex::::insert(rank, &who, index); IndexToId::::insert(rank, index, &who); diff --git a/substrate/frame/ranked-collective/src/tests.rs b/substrate/frame/ranked-collective/src/tests.rs index ad8b7d2a8018bb0566f9b851cf64433791a1c1ce..a7827bcc1aa379f6a28d0755feb555ad487f4426 100644 --- a/substrate/frame/ranked-collective/src/tests.rs +++ b/substrate/frame/ranked-collective/src/tests.rs @@ -27,7 +27,7 @@ use frame_support::{ }; use sp_core::Get; use sp_runtime::{ - traits::{ReduceBy, ReplaceWithDefault}, + traits::{MaybeConvert, ReduceBy, ReplaceWithDefault}, BuildStorage, }; @@ -148,6 +148,17 @@ impl> Convert for MinRankOfClass { } } +pub struct MaxMemberCount; +impl MaybeConvert for MaxMemberCount { + fn maybe_convert(a: Rank) -> Option { + if a == 11 { + Some(2) + } else { + None + } + } +} + parameter_types! { pub static MinRankOfClassDelta: Rank = 0; } @@ -179,6 +190,7 @@ impl Config for Test { type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = (); type VoteWeight = Geometric; + type MaxMemberCount = MaxMemberCount; #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (); } @@ -645,3 +657,32 @@ fn exchange_member_same_noops() { ); }); } + +#[test] +fn max_member_count_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Club::do_add_member_to_rank(1, 10, false)); + assert_ok!(Club::do_add_member_to_rank(2, 10, false)); + assert_ok!(Club::do_add_member_to_rank(3, 10, false)); + assert_eq!(member_count(10), 3); + assert_eq!(member_count(11), 0); + + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_noop!(Club::promote_member(RuntimeOrigin::root(), 3), Error::::TooManyMembers); + assert_eq!(member_count(10), 3); + assert_eq!(member_count(11), 2); + + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + assert_eq!(member_count(10), 3); + assert_eq!(member_count(11), 2); + + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + assert_noop!(Club::promote_member(RuntimeOrigin::root(), 1), Error::::TooManyMembers); + assert_eq!(member_count(10), 3); + assert_eq!(member_count(11), 2); + assert_eq!(member_count(12), 2); + }); +} diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 2fd63597da9caf7ede188eb4a0e70a7d809bae05..3b3a382fe6dcf8ccde7d4681b835674149896715 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -16,18 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs index bec7e02c128bbfc2c9975d881ac5210cd31aa56f..8e30cbe997e17b80e8ef74a598b15343e6affdef 100644 --- a/substrate/frame/recovery/src/mock.rs +++ b/substrate/frame/recovery/src/mock.rs @@ -47,20 +47,11 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index dde522ff89b59a647e87dd8436cb32bf1bb0274f..eb30fef7894b48a7a5f0685c76d96a88a0fdd43b 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -16,27 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -assert_matches = { version = "1.5", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +assert_matches = { optional = true, workspace = true } +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +sp-arithmetic = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } log = { workspace = true } [dev-dependencies] -assert_matches = { version = "1.5" } -pallet-balances = { path = "../balances" } -pallet-preimage = { path = "../preimage" } -pallet-scheduler = { path = "../scheduler" } -sp-core = { path = "../../primitives/core" } +assert_matches = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index fbe27e1a4784733f517bb6ee3259ebdc4b1075b3..0cdf450d3b6c98e942e3613210f31eb34098b008 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -891,7 +891,8 @@ impl, I: 'static> Pallet { call: BoundedCallOf, ) { let now = frame_system::Pallet::::block_number(); - let earliest_allowed = now.saturating_add(track.min_enactment_period); + // Earliest allowed block is always at minimum the next block. + let earliest_allowed = now.saturating_add(track.min_enactment_period.max(One::one())); let desired = desired.evaluate(now); let ok = T::Scheduler::schedule_named( (ASSEMBLY_ID, "enactment", index).using_encoded(sp_io::hashing::blake2_256), diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index 135476d7cb1373ea1e7d3164e9df9d5b9dfc36ee..bf0fa4e1a12e17a90f2d35fd555d5805765b26be 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -83,20 +83,9 @@ impl pallet_scheduler::Config for Test { type OriginPrivilegeCmp = EqualPrivilegeOnly; type Preimages = Preimage; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type MaxLocks = ConstU32<10>; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub static AlarmInterval: u64 = 1; @@ -123,7 +112,7 @@ impl TracksInfo for TestTracksInfo { type Id = u8; type RuntimeOrigin = ::PalletsOrigin; fn tracks() -> &'static [(Self::Id, TrackInfo)] { - static DATA: [(u8, TrackInfo); 2] = [ + static DATA: [(u8, TrackInfo); 3] = [ ( 0u8, TrackInfo { @@ -168,6 +157,28 @@ impl TracksInfo for TestTracksInfo { }, }, ), + ( + 2u8, + TrackInfo { + name: "none", + max_deciding: 3, + decision_deposit: 1, + prepare_period: 2, + decision_period: 2, + confirm_period: 1, + min_enactment_period: 0, + min_approval: Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(95), + ceil: Perbill::from_percent(100), + }, + min_support: Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(90), + ceil: Perbill::from_percent(100), + }, + }, + ), ]; &DATA[..] } @@ -176,6 +187,7 @@ impl TracksInfo for TestTracksInfo { match system_origin { frame_system::RawOrigin::Root => Ok(0), frame_system::RawOrigin::None => Ok(1), + frame_system::RawOrigin::Signed(1) => Ok(2), _ => Err(()), } } else { diff --git a/substrate/frame/referenda/src/tests.rs b/substrate/frame/referenda/src/tests.rs index 52251fcbdbeed0ec5b8830e87a2a4c66bb265e39..3f859636f7cbb34d36d9fbafdfa05c189905d2c6 100644 --- a/substrate/frame/referenda/src/tests.rs +++ b/substrate/frame/referenda/src/tests.rs @@ -682,3 +682,27 @@ fn detects_incorrect_len() { ); }); } + +/// Ensures that `DispatchTime::After(0)` plus `min_enactment_period = 0` works. +#[test] +fn zero_enactment_delay_executes_proposal_at_next_block() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_ok!(Referenda::submit( + RuntimeOrigin::signed(1), + Box::new(RawOrigin::Signed(1).into()), + Preimage::bound( + pallet_balances::Call::transfer_keep_alive { dest: 42, value: 20 }.into() + ) + .unwrap(), + DispatchTime::After(0), + )); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(1), 0)); + assert_eq!(ReferendumCount::::get(), 1); + set_tally(0, 100, 0); + + run_to(9); + + assert_eq!(Balances::free_balance(42), 20); + }); +} diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index d251aacfb5b2c204a29d3098b41cba4f0317b3ca..44cf5ebc3b492a0c414716402308d1b5f22214bd 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index e7317d737fac125e5ebbafa2be46b0426014bd6e..dde264f3949a16cb1667d5ba3c5312b2182a1870 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -16,27 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -pallet-session = { path = "../session", default-features = false, features = ["historical"] } -pallet-staking = { path = "../staking", default-features = false } +pallet-session = { features = ["historical"], workspace = true } +pallet-staking = { workspace = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-timestamp = { path = "../timestamp" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io", default-features = false } -sp-std = { path = "../../primitives/std" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true } +sp-std = { workspace = true, default-features = true } -frame-election-provider-support = { path = "../election-provider-support" } +frame-election-provider-support = { workspace = true, default-features = true } [features] runtime-benchmarks = [ diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 7e7332c3f7e3b39ca9457c6da05c7eb66197d0c4..3c758b91d52fe851dd279a3e7e3bb3b9dd69ebe3 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -84,20 +84,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } pallet_staking_reward_curve::build! { @@ -135,15 +124,11 @@ parameter_types! { pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = Timestamp; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); type SessionsPerEra = SessionsPerEra; type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = frame_system::EnsureRoot; @@ -151,19 +136,10 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; - type MaxControllersInDeprecationBatch = ConstU32<100>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_session::historical::Config for Test { diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index 74a3b8f479fa30df155de60d306251a7f8e7e694..38d91c8104b902eeaf742ab7fa20540ae0d1d598 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -16,14 +16,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] try-runtime = [ diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index 7ecbdb6eeda5b16d35be7a98885ba0a027fbb89a..749c4bcdc7000d7d772433e92c327284b686d410 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -15,26 +15,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -docify = "0.2.8" -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-utility = { path = "../utility", default-features = false, optional = true } -pallet-proxy = { path = "../proxy", default-features = false, optional = true } +codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +pallet-balances = { optional = true, workspace = true } +pallet-utility = { optional = true, workspace = true } +pallet-proxy = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -pallet-balances = { path = "../balances" } -pallet-utility = { path = "../utility" } -pallet-proxy = { path = "../proxy" } -frame-support = { path = "../support", features = ["experimental"] } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index 0beb911267dc5265bdf14cd1f46afff27b422e55..ec1ad82495147dbfc3039fc32dd3cd1e280629ad 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -68,20 +68,10 @@ pub enum HoldReason { SafeMode, } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<2>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<10>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl pallet_utility::Config for Test { diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index 25911269a95ddd3b51e639f56c10a647d321ba77..c9b72da2e649fa9f04c32d95fde2b3102b239185 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -16,18 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -pallet-ranked-collective = { path = "../ranked-collective", default-features = false, optional = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +pallet-ranked-collective = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs index 124ab38c5651b1e3c57d0457952eda8cd5b92e98..69f218943aded41107a143d8eea7d1094333f86b 100644 --- a/substrate/frame/salary/src/tests/integration.rs +++ b/substrate/frame/salary/src/tests/integration.rs @@ -180,6 +180,7 @@ impl pallet_ranked_collective::Config for Test { type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = Salary; type VoteWeight = Geometric; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = Salary; } diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 2105ba13314762151c02b3fc880795798414d32c..ecd03c47db6898a41ec5c09ea078b79797cb16ed 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -17,21 +17,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } log = { workspace = true } -sp-consensus-sassafras = { path = "../../primitives/consensus/sassafras", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +sp-consensus-sassafras = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } +array-bytes = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index e851f876112e8829a7914b18f5695436040ebbb8..0a6b646fc8c3c41ac262758304e33a48a3a2b39d 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -13,22 +13,22 @@ readme = "README.md" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-weights = { path = "../../primitives/weights", default-features = false } -docify = "0.2.8" +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-weights = { workspace = true } +docify = { workspace = true } [dev-dependencies] -pallet-preimage = { path = "../preimage" } -sp-core = { path = "../../primitives/core", default-features = false } -substrate-test-utils = { path = "../../test-utils" } +pallet-preimage = { workspace = true, default-features = true } +sp-core = { workspace = true } +substrate-test-utils = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index f25bd1f1769ba458c8ea8c88ac9d0e18ce8fde2e..69ecd527eb7c9d4491400e6815ce17511f43f41a 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/scored-pool/src/mock.rs b/substrate/frame/scored-pool/src/mock.rs index 9d2f5eb1099f819ad0db2ac1c6d1878a5d2df2da..7708c06e56bd8056b0b2ff89f5b383d3b4c180bf 100644 --- a/substrate/frame/scored-pool/src/mock.rs +++ b/substrate/frame/scored-pool/src/mock.rs @@ -52,20 +52,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index 42ea957ac1581fa8434d94a0737597d82f4b74e5..c7b499548bdb4cce9d5fb7a08a2e45f8fe635e61 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2.2" +codec = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-timestamp = { path = "../timestamp", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } -sp-trie = { path = "../../primitives/trie", default-features = false, optional = true } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-timestamp = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-std = { workspace = true } +sp-trie = { optional = true, workspace = true } +sp-state-machine = { workspace = true } [features] default = ["historical", "std"] diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index a306f9015c02913da0f2ebf96a252a86f4333c51..ba7f7acfd9919f58e5fd3e3e2e50534d118ffde4 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -16,26 +16,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-session = { path = "..", default-features = false } -pallet-staking = { path = "../../staking", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-session = { path = "../../../primitives/session", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { workspace = true } +rand = { features = ["std_rng"], workspace = true } +frame-benchmarking = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-session = { workspace = true } +pallet-staking = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = "2.11.1" -frame-election-provider-support = { path = "../../election-provider-support" } -pallet-balances = { path = "../../balances" } -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -pallet-timestamp = { path = "../../timestamp" } -sp-core = { path = "../../../primitives/core" } -sp-io = { path = "../../../primitives/io" } +codec = { features = ["derive"], workspace = true, default-features = true } +scale-info = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 5cba79ef5b9a2f556096f7a7117330b3a7d80c37..2aec58cceded2d186d9ead2ca2c4768e7cce1b90 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -54,20 +54,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_timestamp::Config for Test { @@ -139,35 +129,19 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBounds; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = (); type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl crate::Config for Test {} diff --git a/substrate/frame/session/src/historical/mod.rs b/substrate/frame/session/src/historical/mod.rs index b9cecea1a7f7144fb7f846548b827455dce0a1e5..618497e3d54db378717561beb47070204af99ab6 100644 --- a/substrate/frame/session/src/historical/mod.rs +++ b/substrate/frame/session/src/historical/mod.rs @@ -37,21 +37,22 @@ use sp_runtime::{ }; use sp_session::{MembershipProof, ValidatorCount}; use sp_staking::SessionIndex; -use sp_std::prelude::*; +use sp_std::{fmt::Debug, prelude::*}; use sp_trie::{ trie_types::{TrieDBBuilder, TrieDBMutBuilderV0}, - LayoutV0, MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, + LayoutV0, MemoryDB, Recorder, StorageProof, Trie, TrieMut, TrieRecorder, }; use frame_support::{ print, traits::{KeyOwnerProofSystem, ValidatorSet, ValidatorSetWithIdentification}, - Parameter, + Parameter, LOG_TARGET, }; use crate::{self as pallet_session, Pallet as Session}; pub use pallet::*; +use sp_trie::{accessed_nodes_tracker::AccessedNodesTracker, recorder_ext::RecorderExt}; #[frame_support::pallet] pub mod pallet { @@ -118,6 +119,16 @@ impl Pallet { } }) } + + fn full_id_validators() -> Vec<(T::ValidatorId, T::FullIdentification)> { + >::validators() + .into_iter() + .filter_map(|validator| { + T::FullIdentificationOf::convert(validator.clone()) + .map(|full_id| (validator, full_id)) + }) + .collect::>() + } } impl ValidatorSet for Pallet { @@ -264,35 +275,16 @@ impl ProvingTrie { Ok(ProvingTrie { db, root }) } - fn from_nodes(root: T::Hash, nodes: &[Vec]) -> Self { - use sp_trie::HashDBT; - - let mut memory_db = MemoryDB::default(); - for node in nodes { - HashDBT::insert(&mut memory_db, EMPTY_PREFIX, &node[..]); - } - - ProvingTrie { db: memory_db, root } + fn from_proof(root: T::Hash, proof: StorageProof) -> Self { + ProvingTrie { db: proof.into_memory_db(), root } } /// Prove the full verification data for a given key and key ID. pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option>> { let mut recorder = Recorder::>::new(); - { - let trie = - TrieDBBuilder::new(&self.db, &self.root).with_recorder(&mut recorder).build(); - let val_idx = (key_id, key_data).using_encoded(|s| { - trie.get(s).ok()?.and_then(|raw| u32::decode(&mut &*raw).ok()) - })?; - - val_idx.using_encoded(|s| { - trie.get(s) - .ok()? - .and_then(|raw| >::decode(&mut &*raw).ok()) - })?; - } + self.query(key_id, key_data, Some(&mut recorder)); - Some(recorder.drain().into_iter().map(|r| r.data).collect()) + Some(recorder.into_raw_storage_proof()) } /// Access the underlying trie root. @@ -300,10 +292,17 @@ impl ProvingTrie { &self.root } - // Check a proof contained within the current memory-db. Returns `None` if the - // nodes within the current `MemoryDB` are insufficient to query the item. - fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { - let trie = TrieDBBuilder::new(&self.db, &self.root).build(); + /// Search for a key inside the proof. + fn query( + &self, + key_id: KeyTypeId, + key_data: &[u8], + recorder: Option<&mut dyn TrieRecorder>, + ) -> Option> { + let trie = TrieDBBuilder::new(&self.db, &self.root) + .with_optional_recorder(recorder) + .build(); + let val_idx = (key_id, key_data) .using_encoded(|s| trie.get(s)) .ok()? @@ -322,13 +321,7 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet Option { let session = >::current_index(); - let validators = >::validators() - .into_iter() - .filter_map(|validator| { - T::FullIdentificationOf::convert(validator.clone()) - .map(|full_id| (validator, full_id)) - }) - .collect::>(); + let validators = Self::full_id_validators(); let count = validators.len() as ValidatorCount; @@ -343,30 +336,35 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet Option> { - let (id, data) = key; - - if proof.session == >::current_index() { - >::key_owner(id, data.as_ref()).and_then(|owner| { - T::FullIdentificationOf::convert(owner.clone()).and_then(move |id| { - let count = >::validators().len() as ValidatorCount; - - if count != proof.validator_count { - return None - } + fn print_error(e: E) { + log::error!( + target: LOG_TARGET, + "Rejecting equivocation report because of key ownership proof error: {:?}", e + ); + } - Some((owner, id)) - }) - }) + let (id, data) = key; + let (root, count) = if proof.session == >::current_index() { + let validators = Self::full_id_validators(); + let count = validators.len() as ValidatorCount; + let trie = ProvingTrie::::generate_for(validators).ok()?; + (trie.root, count) } else { - let (root, count) = >::get(&proof.session)?; - - if count != proof.validator_count { - return None - } + >::get(&proof.session)? + }; - let trie = ProvingTrie::::from_nodes(root, &proof.trie_nodes); - trie.query(id, data.as_ref()) + if count != proof.validator_count { + return None } + + let proof = StorageProof::new_with_duplicate_nodes_check(proof.trie_nodes) + .map_err(print_error) + .ok()?; + let mut accessed_nodes_tracker = AccessedNodesTracker::::new(proof.len()); + let trie = ProvingTrie::::from_proof(root, proof); + let res = trie.query(id, data.as_ref(), Some(&mut accessed_nodes_tracker))?; + accessed_nodes_tracker.ensure_no_unused_nodes().map_err(print_error).ok()?; + Some(res) } } diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index ed7fea523bffbd15a4da83e16f167ba239ecfca8..6582d47f194310250868fb8982685c3098197f33 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -17,24 +17,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -rand_chacha = { version = "0.3.1", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +rand_chacha = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +codec = { features = ["derive"], workspace = true } -sp-std = { path = "../../primitives/std", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } [dev-dependencies] -frame-support-test = { path = "../support/test" } -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-io = { path = "../../primitives/io" } +frame-support-test = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index e41f7f1c0ef308673b11ce99018156e2eff064e1..e09d8fc4fa1d4e6a86ea6931ccab10b9bca38651 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -30,13 +30,43 @@ //! > **F**ramework for **R**untime **A**ggregation of **M**odularized **E**ntities: Substrate's //! > State Transition Function (Runtime) Framework. //! -//! ## Documentation +//! //! ## Usage //! -//! See [`polkadot_sdk::frame`](../polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). +//! The main intended use of this crate is for it to be imported with its preludes: //! -//! ## WARNING: Experimental +//! ``` +//! # use polkadot_sdk_frame as frame; +//! #[frame::pallet] +//! pub mod pallet { +//! # use polkadot_sdk_frame as frame; +//! use frame::prelude::*; +//! // ^^ using the prelude! //! -//! **This crate and all of its content is experimental, and should not yet be used in production.** +//! #[pallet::config] +//! pub trait Config: frame_system::Config {} +//! +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! } +//! +//! pub mod tests { +//! # use polkadot_sdk_frame as frame; +//! use frame::testing_prelude::*; +//! } +//! +//! pub mod runtime { +//! # use polkadot_sdk_frame as frame; +//! use frame::runtime::prelude::*; +//! } +//! ``` +//! +//! See: [`prelude`], [`testing_prelude`] and [`runtime::prelude`]. +//! +//! Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. +//! +//! ## Documentation +//! +//! See [`polkadot_sdk::frame`](../polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). //! //! ## Underlying dependencies //! @@ -46,9 +76,9 @@ //! In short, this crate only re-exports types and traits from multiple sources. All of these //! sources are listed (and re-exported again) in [`deps`]. //! -//! ## Usage +//! ## WARNING: Experimental //! -//! Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. +//! **This crate and all of its content is experimental, and should not yet be used in production.** #![cfg_attr(not(feature = "std"), no_std)] #![cfg(feature = "experimental")] diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 22df746d667ab477dc576c6c085bbf1bf1675fb6..afae4652a4ef2ff954d1aa450ebbd453d095adbd 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -17,40 +17,40 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-session = { path = "../session", default-features = false, features = [ +], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-std = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-session = { features = [ "historical", -] } -pallet-authorship = { path = "../authorship", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +], workspace = true } +pallet-authorship = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } +frame-election-provider-support = { workspace = true } log = { workspace = true } # Optional imports for benchmarking -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -rand_chacha = { version = "0.3.1", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +rand_chacha = { optional = true, workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-tracing = { path = "../../primitives/tracing" } -sp-core = { path = "../../primitives/core" } -sp-npos-elections = { path = "../../primitives/npos-elections" } -pallet-timestamp = { path = "../timestamp" } -pallet-staking-reward-curve = { path = "reward-curve" } -pallet-bags-list = { path = "../bags-list" } -substrate-test-utils = { path = "../../test-utils" } -frame-benchmarking = { path = "../benchmarking" } -frame-election-provider-support = { path = "../election-provider-support" } -rand_chacha = { version = "0.3.1" } +pallet-balances = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } +frame-benchmarking = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +rand_chacha = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index e2a2782db2da1527eff9d9948020fb72bd02e370..acb819c3169ec105954ea599331d29bb83e4c2cd 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "3.0.0" -proc-macro2 = "1.0.56" +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["full", "visit"], workspace = true } [dev-dependencies] -sp-runtime = { path = "../../../primitives/runtime" } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/frame/staking/reward-fn/Cargo.toml b/substrate/frame/staking/reward-fn/Cargo.toml index 5169db5072e2fc80805605c3517d8c6e779e5620..5adbb8382da50c05cf38c2ebd9f952c1330224c8 100644 --- a/substrate/frame/staking/reward-fn/Cargo.toml +++ b/substrate/frame/staking/reward-fn/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } +sp-arithmetic = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/staking/runtime-api/Cargo.toml b/substrate/frame/staking/runtime-api/Cargo.toml index 19da2f24ff00e80321e20506b2fe13a0ddd71978..624279624bc8098f61d4f650cd8da151486bdb57 100644 --- a/substrate/frame/staking/runtime-api/Cargo.toml +++ b/substrate/frame/staking/runtime-api/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-api = { default-features = false, path = "../../../primitives/api" } -sp-staking = { default-features = false, path = "../../../primitives/staking" } +codec = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-staking = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 8c60dec65a81a123b5d1bd04b3ea8614ae1e9f0f..7e6a87955b08371dddbecc6e17477ed5808173ef 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -124,20 +124,12 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type MaxLocks = frame_support::traits::ConstU32<1024>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } sp_runtime::impl_opaque_keys! { @@ -269,19 +261,15 @@ impl OnStakingUpdate for EventListenerMock { // Disabling threshold for `UpToLimitDisablingStrategy` pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3; +#[derive_impl(crate::config_preludes::TestDefaultConfig)] impl crate::pallet::pallet::Config for Test { type Currency = Balances; - type CurrencyBalance = ::Balance; type UnixTime = Timestamp; - type CurrencyToVote = (); type RewardRemainder = RewardRemainderMock; - type RuntimeEvent = RuntimeEvent; - type Slash = (); type Reward = MockReward; type SessionsPerEra = SessionsPerEra; type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = EnsureOneOrRoot; - type BondingDuration = BondingDuration; type SessionInterface = Self; type EraPayout = ConvertCurve; type NextNewSession = Session; @@ -296,8 +284,6 @@ impl crate::pallet::pallet::Config for Test { type HistoryDepth = HistoryDepth; type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type EventListeners = EventListenerMock; - type BenchmarkingConfig = TestBenchmarkingConfig; - type WeightInfo = (); type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 284a801a0f050eb79672feb2e1167e5d14f033a5..a76e47edf38039501fe0ac9d3434e13b3fdafe4c 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -86,9 +86,10 @@ pub mod pallet { Remove, } - #[pallet::config] + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// The staking balance. + #[pallet::no_default] type Currency: LockableCurrency< Self::AccountId, Moment = BlockNumberFor, @@ -109,6 +110,7 @@ pub mod pallet { /// /// It is guaranteed to start being called from the first `on_finalize`. Thus value at /// genesis is not used. + #[pallet::no_default] type UnixTime: UnixTime; /// Convert a balance into a number used for election calculation. This must fit into a @@ -117,9 +119,11 @@ pub mod pallet { /// in 128. /// Consequently, the backward convert is used convert the u128s from sp-elections back to a /// [`BalanceOf`]. + #[pallet::no_default_bounds] type CurrencyToVote: sp_staking::currency_to_vote::CurrencyToVote>; /// Something that provides the election functionality. + #[pallet::no_default] type ElectionProvider: ElectionProvider< AccountId = Self::AccountId, BlockNumber = BlockNumberFor, @@ -127,6 +131,7 @@ pub mod pallet { DataProvider = Pallet, >; /// Something that provides the election functionality at genesis. + #[pallet::no_default] type GenesisElectionProvider: ElectionProvider< AccountId = Self::AccountId, BlockNumber = BlockNumberFor, @@ -134,6 +139,7 @@ pub mod pallet { >; /// Something that defines the maximum number of nominations per nominator. + #[pallet::no_default_bounds] type NominationsQuota: NominationsQuota>; /// Number of eras to keep in history. @@ -161,17 +167,21 @@ pub mod pallet { /// Tokens have been minted and are unused for validator-reward. /// See [Era payout](./index.html#era-payout). + #[pallet::no_default_bounds] type RewardRemainder: OnUnbalanced>; /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Handler for the unbalanced reduction when slashing a staker. + #[pallet::no_default_bounds] type Slash: OnUnbalanced>; /// Handler for the unbalanced increment when rewarding a staker. /// NOTE: in most cases, the implementation of `OnUnbalanced` should modify the total /// issuance. + #[pallet::no_default_bounds] type Reward: OnUnbalanced>; /// Number of sessions per era. @@ -192,6 +202,7 @@ pub mod pallet { /// The origin which can manage less critical staking parameters that does not require root. /// /// Supported actions: (1) cancel deferred slash, (2) set minimum commission. + #[pallet::no_default] type AdminOrigin: EnsureOrigin; /// Interface for interacting with a session pallet. @@ -199,10 +210,12 @@ pub mod pallet { /// The payout for validators and the system for the current era. /// See [Era payout](./index.html#era-payout). + #[pallet::no_default] type EraPayout: EraPayout>; /// Something that can estimate the next session change, accurately or as a best effort /// guess. + #[pallet::no_default_bounds] type NextNewSession: EstimateNextNewSession>; /// The maximum size of each `T::ExposurePage`. @@ -230,6 +243,7 @@ pub mod pallet { /// staker. In case of `bags-list`, this always means using `rebag` and `putInFrontOf`. /// /// Invariant: what comes out of this list will always be a nominator. + #[pallet::no_default] type VoterList: SortedListProvider; /// WIP: This is a noop as of now, the actual business logic that's described below is going @@ -252,6 +266,7 @@ pub mod pallet { /// validators, they can chill at any point, and their approval stakes will still be /// recorded. This implies that what comes out of iterating this list MIGHT NOT BE AN ACTIVE /// VALIDATOR. + #[pallet::no_default] type TargetList: SortedListProvider>; /// The maximum number of `unlocking` chunks a [`StakingLedger`] can @@ -274,18 +289,66 @@ pub mod pallet { /// receives. /// /// WARNING: this only reports slashing and withdraw events for the time being. + #[pallet::no_default_bounds] type EventListeners: sp_staking::OnStakingUpdate>; - // `DisablingStragegy` controls how validators are disabled + /// `DisablingStragegy` controls how validators are disabled + #[pallet::no_default_bounds] type DisablingStrategy: DisablingStrategy; /// Some parameters of the benchmarking. + #[cfg(feature = "std")] + type BenchmarkingConfig: BenchmarkingConfig; + + #[cfg(not(feature = "std"))] + #[pallet::no_default] type BenchmarkingConfig: BenchmarkingConfig; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } + /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. + pub mod config_preludes { + use super::*; + use frame_support::{derive_impl, parameter_types, traits::ConstU32}; + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + parameter_types! { + pub const SessionsPerEra: SessionIndex = 3; + pub const BondingDuration: EraIndex = 3; + } + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + #[inject_runtime_type] + type RuntimeEvent = (); + type CurrencyBalance = u128; + type CurrencyToVote = (); + type NominationsQuota = crate::FixedNominationsQuota<16>; + type HistoryDepth = ConstU32<84>; + type RewardRemainder = (); + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type SlashDeferDuration = (); + type SessionInterface = (); + type NextNewSession = (); + type MaxExposurePageSize = ConstU32<64>; + type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; + type EventListeners = (); + type DisablingStrategy = crate::UpToLimitDisablingStrategy; + #[cfg(feature = "std")] + type BenchmarkingConfig = crate::TestBenchmarkingConfig; + type WeightInfo = (); + } + } + /// The ideal number of active validators. #[pallet::storage] #[pallet::getter(fn validator_count)] diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 2229eb28329ad07b5eb8742293a3b10eb6bdc905..825b03b8ecaac159e1332a2e73eb5441601f0bf2 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -780,7 +780,7 @@ fn nominators_also_get_slashed_pro_rata() { #[test] fn double_staking_should_fail() { // should test (in the same order): - // * an account already bonded as stash cannot be be stashed again. + // * an account already bonded as stash cannot be stashed again. // * an account already bonded as stash cannot nominate. // * an account already bonded as controller can nominate. ExtBuilder::default().try_state(false).build_and_execute(|| { diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 0870989d81f15df54cec9c41a0fa408edb45af36..8bb4079715e2a99298239160f04d3bc967633c8c 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -15,27 +15,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -thousands = { version = "0.2.0", optional = true } -zstd = { version = "0.12.4", default-features = false, optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -remote-externalities = { package = "frame-remote-externalities", path = "../../utils/frame/remote-externalities", optional = true } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -substrate-state-trie-migration-rpc = { path = "../../utils/frame/rpc/state-trie-migration-rpc", optional = true } +thousands = { optional = true, workspace = true } +zstd = { optional = true, workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +remote-externalities = { optional = true, workspace = true, default-features = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +substrate-state-trie-migration-rpc = { optional = true, workspace = true, default-features = true } [dev-dependencies] -parking_lot = "0.12.1" -tokio = { version = "1.22.0", features = ["macros"] } -pallet-balances = { path = "../balances" } -sp-tracing = { path = "../../primitives/tracing" } +parking_lot = { workspace = true, default-features = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 4ec649f9080d463dcf72dbb96e9a34ce49124af0..22ad640d3bd23d96c43d764dfaadddc3a5b687f3 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -109,7 +109,6 @@ pub mod pallet { MaxEncodedLen, )] #[scale_info(skip_type_params(MaxKeyLen))] - #[codec(mel_bound())] pub enum Progress> { /// Yet to begin. ToStart, @@ -126,7 +125,6 @@ pub mod pallet { /// /// It tracks the last top and child keys read. #[derive(Clone, Encode, Decode, scale_info::TypeInfo, PartialEq, Eq, MaxEncodedLen)] - #[codec(mel_bound(T: Config))] #[scale_info(skip_type_params(T))] pub struct MigrationTask { /// The current top trie migration progress. diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index 989f0c330fc10b19bde46d92d853fcbdef2b463a..233b7255fc5dbc0e617c53c427252431052cf891 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -15,20 +15,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-statement-store = { path = "../../primitives/statement-store", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-statement-store = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/statement/src/mock.rs b/substrate/frame/statement/src/mock.rs index 35d51e7a27bfc82e82d9b0b19220c31816c27e37..34afd332c083ddb4baefd367349a9e37f806838f 100644 --- a/substrate/frame/statement/src/mock.rs +++ b/substrate/frame/statement/src/mock.rs @@ -51,20 +51,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<5>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; } ord_parameter_types! { diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index fcbb00087e26c7b08a00227c142a2565b2c0c04b..5d5d09d692342746557c3a80190ff6257de37a53 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -docify = "0.2.8" +docify = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index a6c4fd6ee309ec4fc07495b59df341b854dfb1a9..560697ab710b1af575c62c93c217f965e3f1d042 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -16,59 +16,59 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "6.2.2", default-features = false } +array-bytes = { workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", "max-encoded-len", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } -frame-metadata = { version = "16.0.0", default-features = false, features = [ +], workspace = true } +frame-metadata = { features = [ "current", -] } -sp-api = { path = "../../primitives/api", default-features = false, features = [ +], workspace = true } +sp-api = { features = [ "frame-metadata", -] } -sp-std = { path = "../../primitives/std", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = [ +], workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = [ "serde", -] } -sp-tracing = { path = "../../primitives/tracing", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } -sp-weights = { path = "../../primitives/weights", default-features = false } -sp-debug-derive = { path = "../../primitives/debug-derive", default-features = false } -sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false } -tt-call = "1.0.8" -macro_magic = "0.5.0" -frame-support-procedural = { path = "procedural", default-features = false } -paste = "1.0" -sp-state-machine = { path = "../../primitives/state-machine", default-features = false, optional = true } -bitflags = "1.3" -impl-trait-for-tuples = "0.2.2" -smallvec = "1.11.0" +], workspace = true } +sp-tracing = { workspace = true } +sp-core = { workspace = true } +sp-arithmetic = { workspace = true } +sp-inherents = { workspace = true } +sp-staking = { workspace = true } +sp-weights = { workspace = true } +sp-debug-derive = { workspace = true } +sp-metadata-ir = { workspace = true } +tt-call = { workspace = true } +macro_magic = { workspace = true } +frame-support-procedural = { workspace = true } +paste = { workspace = true, default-features = true } +sp-state-machine = { optional = true, workspace = true } +bitflags = { workspace = true } +impl-trait-for-tuples = { workspace = true } +smallvec = { workspace = true, default-features = true } log = { workspace = true } -sp-crypto-hashing-proc-macro = { path = "../../primitives/crypto/hashing/proc-macro" } -k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } -environmental = { version = "1.1.4", default-features = false } -sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } +k256 = { features = ["ecdsa"], workspace = true } +environmental = { workspace = true } +sp-genesis-builder = { workspace = true } serde_json = { features = ["alloc"], workspace = true } -docify = "0.2.8" -static_assertions = "1.1.0" +docify = { workspace = true } +static_assertions = { workspace = true, default-features = true } -aquamarine = { version = "0.5.0" } +aquamarine = { workspace = true } [dev-dependencies] -assert_matches = "1.3.0" -pretty_assertions = "1.2.1" -sp-timestamp = { path = "../../primitives/timestamp", default-features = false } -frame-system = { path = "../system" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } +assert_matches = { workspace = true } +pretty_assertions = { workspace = true } +sp-timestamp = { workspace = true } +frame-system = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index b04af63de81174d02592c6d7caa51ad902c10ca8..fbb4da0177a4d9622375ef634b41cb688221eadb 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -18,21 +18,21 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -derive-syn-parse = "0.2.0" -Inflector = "0.11.4" -cfg-expr = "0.15.5" -itertools = "0.11" -proc-macro2 = "1.0.56" +derive-syn-parse = { workspace = true } +Inflector = { workspace = true } +cfg-expr = { workspace = true } +itertools = { workspace = true } +proc-macro2 = { workspace = true } quote = { workspace = true } -syn = { features = ["full", "visit-mut"], workspace = true } -frame-support-procedural-tools = { path = "tools" } -macro_magic = { version = "0.5.0", features = ["proc_support"] } -proc-macro-warning = { version = "1.0.0", default-features = false } -expander = "2.0.0" -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing", default-features = false } +syn = { features = ["full", "parsing", "visit-mut"], workspace = true } +frame-support-procedural-tools = { workspace = true, default-features = true } +macro_magic = { features = ["proc_support"], workspace = true } +proc-macro-warning = { workspace = true } +expander = { workspace = true } +sp-crypto-hashing = { workspace = true } [dev-dependencies] -regex = "1" +regex = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index ded77bed4c8e29145b721581aec12eef1dcd952a..532e032d0cb78ac536fcebe16267d1d8c6d2949e 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -65,8 +65,6 @@ pub enum RuntimeDeclaration { /// Declaration of a runtime with some pallet with implicit declaration of parts. #[derive(Debug)] pub struct ImplicitRuntimeDeclaration { - pub name: Ident, - pub where_section: Option, pub pallets: Vec, } @@ -98,11 +96,7 @@ impl Parse for RuntimeDeclaration { match convert_pallets(pallets.content.inner.into_iter().collect())? { PalletsConversion::Implicit(pallets) => - Ok(RuntimeDeclaration::Implicit(ImplicitRuntimeDeclaration { - name, - where_section, - pallets, - })), + Ok(RuntimeDeclaration::Implicit(ImplicitRuntimeDeclaration { pallets })), PalletsConversion::Explicit(pallets) => Ok(RuntimeDeclaration::Explicit(ExplicitRuntimeDeclaration { name, @@ -124,9 +118,6 @@ impl Parse for RuntimeDeclaration { #[derive(Debug)] pub struct WhereSection { pub span: Span, - pub block: syn::TypePath, - pub node_block: syn::TypePath, - pub unchecked_extrinsic: syn::TypePath, } impl Parse for WhereSection { @@ -145,10 +136,9 @@ impl Parse for WhereSection { } input.parse::()?; } - let block = remove_kind(input, WhereKind::Block, &mut definitions)?.value; - let node_block = remove_kind(input, WhereKind::NodeBlock, &mut definitions)?.value; - let unchecked_extrinsic = - remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?.value; + remove_kind(input, WhereKind::Block, &mut definitions)?; + remove_kind(input, WhereKind::NodeBlock, &mut definitions)?; + remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?; if let Some(WhereDefinition { ref kind_span, ref kind, .. }) = definitions.first() { let msg = format!( "`{:?}` was declared above. Please use exactly one declaration for `{:?}`.", @@ -156,7 +146,7 @@ impl Parse for WhereSection { ); return Err(Error::new(*kind_span, msg)) } - Ok(Self { span: input.span(), block, node_block, unchecked_extrinsic }) + Ok(Self { span: input.span() }) } } @@ -171,7 +161,6 @@ pub enum WhereKind { pub struct WhereDefinition { pub kind_span: Span, pub kind: WhereKind, - pub value: syn::TypePath, } impl Parse for WhereDefinition { @@ -187,14 +176,10 @@ impl Parse for WhereDefinition { return Err(lookahead.error()) }; - Ok(Self { - kind_span, - kind, - value: { - let _: Token![=] = input.parse()?; - input.parse()? - }, - }) + let _: Token![=] = input.parse()?; + let _: syn::TypePath = input.parse()?; + + Ok(Self { kind_span, kind }) } } diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index e812ac071b2c9a2cf2e8ef199c2e1d656b7f0494..51e5657a2e8be433729a781b339f42b7f80e9bd8 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -81,6 +81,9 @@ fn counter_prefix(prefix: &str) -> String { /// Construct a runtime, with the given name and the given pallets. /// +/// NOTE: A new version of this macro is available at `frame_support::runtime`. This macro will +/// soon be deprecated. Please use the new macro instead. +/// /// The parameters here are specific types for `Block`, `NodeBlock`, and `UncheckedExtrinsic` /// and the pallets that are used by the runtime. /// `Block` is the block type that is used in the runtime and `NodeBlock` is the block type @@ -1188,67 +1191,11 @@ pub fn import_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { .into() } -/// Construct a runtime, with the given name and the given pallets. -/// -/// # Example: /// -/// ```ignore -/// #[frame_support::runtime] -/// mod runtime { -/// // The main runtime -/// #[runtime::runtime] -/// // Runtime Types to be generated -/// #[runtime::derive( -/// RuntimeCall, -/// RuntimeEvent, -/// RuntimeError, -/// RuntimeOrigin, -/// RuntimeFreezeReason, -/// RuntimeHoldReason, -/// RuntimeSlashReason, -/// RuntimeLockId, -/// RuntimeTask, -/// )] -/// pub struct Runtime; -/// -/// #[runtime::pallet_index(0)] -/// pub type System = frame_system; -/// -/// #[runtime::pallet_index(1)] -/// pub type Test = path::to::test; -/// -/// // Pallet with instance. -/// #[runtime::pallet_index(2)] -/// pub type Test2_Instance1 = test2; -/// -/// // Pallet with calls disabled. -/// #[runtime::pallet_index(3)] -/// #[runtime::disable_call] -/// pub type Test3 = test3; -/// -/// // Pallet with unsigned extrinsics disabled. -/// #[runtime::pallet_index(4)] -/// #[runtime::disable_unsigned] -/// pub type Test4 = test4; -/// } -/// ``` -/// -/// # Legacy Ordering -/// -/// An optional attribute can be defined as #[frame_support::runtime(legacy_ordering)] to -/// ensure that the order of hooks is same as the order of pallets (and not based on the -/// pallet_index). This is to support legacy runtimes and should be avoided for new ones. -/// -/// # Note -/// -/// The population of the genesis storage depends on the order of pallets. So, if one of your -/// pallets depends on another pallet, the pallet that is depended upon needs to come before -/// the pallet depending on it. -/// -/// # Type definitions +/// --- /// -/// * The macro generates a type alias for each pallet to their `Pallet`. E.g. `type System = -/// frame_system::Pallet` +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::runtime`. #[proc_macro_attribute] pub fn runtime(attr: TokenStream, item: TokenStream) -> TokenStream { runtime::runtime(attr, item) diff --git a/substrate/frame/support/procedural/src/pallet/expand/error.rs b/substrate/frame/support/procedural/src/pallet/expand/error.rs index 72fb6e923572387622ef2ea820dc6931c32468ef..b921d66ff9e73895fef8e1e0985fc205446b60e8 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/error.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/error.rs @@ -71,23 +71,25 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { ) ); - let as_str_matches = error.variants.iter().map( - |VariantDef { ident: variant, field: field_ty, docs: _, cfg_attrs }| { - let variant_str = variant.to_string(); - let cfg_attrs = cfg_attrs.iter().map(|attr| attr.to_token_stream()); - match field_ty { - Some(VariantField { is_named: true }) => { - quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant { .. } => #variant_str,) - }, - Some(VariantField { is_named: false }) => { - quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant(..) => #variant_str,) - }, - None => { - quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant => #variant_str,) - }, - } - }, - ); + let as_str_matches = + error + .variants + .iter() + .map(|VariantDef { ident: variant, field: field_ty, cfg_attrs }| { + let variant_str = variant.to_string(); + let cfg_attrs = cfg_attrs.iter().map(|attr| attr.to_token_stream()); + match field_ty { + Some(VariantField { is_named: true }) => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant { .. } => #variant_str,) + }, + Some(VariantField { is_named: false }) => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant(..) => #variant_str,) + }, + None => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant => #variant_str,) + }, + } + }); let error_item = { let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[error.index]; diff --git a/substrate/frame/support/procedural/src/pallet/parse/composite.rs b/substrate/frame/support/procedural/src/pallet/parse/composite.rs index c3ac74846bf7c664289dab8d046b51370fe28f5f..20fc30cd26b1f5eca394a319e31fee1280978b69 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/composite.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/composite.rs @@ -87,8 +87,6 @@ pub mod keyword { } pub struct CompositeDef { - /// The index of the CompositeDef item in the pallet module. - pub index: usize, /// The composite keyword used (contains span). pub composite_keyword: keyword::CompositeKeyword, /// Name of the associated type. @@ -104,7 +102,6 @@ pub struct CompositeDef { impl CompositeDef { pub fn try_from( attr_span: proc_macro2::Span, - index: usize, scrate: &syn::Path, item: &mut syn::Item, ) -> syn::Result { @@ -180,7 +177,6 @@ impl CompositeDef { syn::parse2::(item.ident.to_token_stream())?; Ok(CompositeDef { - index, composite_keyword, attr_span, generics: item.generics.clone(), diff --git a/substrate/frame/support/procedural/src/pallet/parse/config.rs b/substrate/frame/support/procedural/src/pallet/parse/config.rs index eaeaab247588052a36d7aac469935b62151fa013..6febaac9ffa3284248f6d22466a1eb350520330d 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/config.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/config.rs @@ -62,8 +62,6 @@ pub struct ConfigDef { pub has_event_type: bool, /// The where clause on trait definition but modified so `Self` is `T`. pub where_clause: Option, - /// The span of the pallet::config attribute. - pub attr_span: proc_macro2::Span, /// Whether a default sub-trait should be generated. /// /// Contains default sub-trait items (instantiated by `#[pallet::config(with_default)]`). @@ -325,7 +323,6 @@ pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenS impl ConfigDef { pub fn try_from( frame_system: &syn::Path, - attr_span: proc_macro2::Span, index: usize, item: &mut syn::Item, enable_default: bool, @@ -484,7 +481,6 @@ impl ConfigDef { consts_metadata, has_event_type, where_clause, - attr_span, default_sub_trait, }) } diff --git a/substrate/frame/support/procedural/src/pallet/parse/error.rs b/substrate/frame/support/procedural/src/pallet/parse/error.rs index 362df8d7340ce0caad72cf85df88378569329672..bc4087a0ea763d5ea6b80f221ab53a50386a8f02 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/error.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/error.rs @@ -16,7 +16,6 @@ // limitations under the License. use super::helper; -use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; use syn::{spanned::Spanned, Fields}; @@ -37,8 +36,6 @@ pub struct VariantDef { pub ident: syn::Ident, /// The variant field, if any. pub field: Option, - /// The variant doc literals. - pub docs: Vec, /// The `cfg` attributes. pub cfg_attrs: Vec, } @@ -101,12 +98,7 @@ impl ErrorDef { } let cfg_attrs: Vec = helper::get_item_cfg_attrs(&variant.attrs); - Ok(VariantDef { - ident: variant.ident.clone(), - field: field_ty, - docs: get_doc_literals(&variant.attrs), - cfg_attrs, - }) + Ok(VariantDef { ident: variant.ident.clone(), field: field_ty, cfg_attrs }) }) .collect::>()?; diff --git a/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs b/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs index 2ba6c44b7d158e20d09a309f31b93fadf2cf8cf8..12a373db180c328b380b42a5fbf1cbec6efe5e81 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs @@ -37,8 +37,6 @@ pub struct ExtraConstantsDef { pub where_clause: Option, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, - /// The index of call item in pallet module. - pub index: usize, /// The extra constant defined. pub extra_constants: Vec, } @@ -77,7 +75,7 @@ impl syn::parse::Parse for ExtraConstAttr { } impl ExtraConstantsDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -150,11 +148,6 @@ impl ExtraConstantsDef { }); } - Ok(Self { - index, - instances, - where_clause: item.generics.where_clause.clone(), - extra_constants, - }) + Ok(Self { instances, where_clause: item.generics.where_clause.clone(), extra_constants }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs b/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs index d0e1d9ec998ec00661ef02eee7f4077a11c8084b..bc925a21c9c8e15ad7f1041843bfdd6d1ebaf164 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs @@ -20,8 +20,6 @@ use syn::spanned::Spanned; /// Definition for pallet genesis build implementation. pub struct GenesisBuildDef { - /// The index of item in pallet module. - pub index: usize, /// A set of usage of instance, must be check for consistency with trait. pub instances: Option>, /// The where_clause used. @@ -31,11 +29,7 @@ pub struct GenesisBuildDef { } impl GenesisBuildDef { - pub fn try_from( - attr_span: proc_macro2::Span, - index: usize, - item: &mut syn::Item, - ) -> syn::Result { + pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -56,6 +50,6 @@ impl GenesisBuildDef { let instances = helper::check_genesis_builder_usage(item_trait)?.map(|instances| vec![instances]); - Ok(Self { attr_span, index, instances, where_clause: item.generics.where_clause.clone() }) + Ok(Self { attr_span, instances, where_clause: item.generics.where_clause.clone() }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/hooks.rs b/substrate/frame/support/procedural/src/pallet/parse/hooks.rs index 37d7d22f4b6bb3c60cb8e9c4b9eaffc221ce3b70..07b51c8b91fa8fa17cbaca1fc65b9b2a9914fd21 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/hooks.rs @@ -20,8 +20,6 @@ use syn::spanned::Spanned; /// Implementation of the pallet hooks. pub struct HooksDef { - /// The index of item in pallet. - pub index: usize, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, /// The where_clause used. @@ -33,11 +31,7 @@ pub struct HooksDef { } impl HooksDef { - pub fn try_from( - attr_span: proc_macro2::Span, - index: usize, - item: &mut syn::Item, - ) -> syn::Result { + pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -77,7 +71,6 @@ impl HooksDef { Ok(Self { attr_span, - index, instances, has_runtime_upgrade, where_clause: item.generics.where_clause.clone(), diff --git a/substrate/frame/support/procedural/src/pallet/parse/inherent.rs b/substrate/frame/support/procedural/src/pallet/parse/inherent.rs index d8641691a40e30c5a006fc5fb7555910cfe3db35..56ebe8e5df433c2b2a1e80e40c10276e2c92bb73 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/inherent.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/inherent.rs @@ -20,14 +20,12 @@ use syn::spanned::Spanned; /// The definition of the pallet inherent implementation. pub struct InherentDef { - /// The index of inherent item in pallet module. - pub index: usize, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, } impl InherentDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -55,6 +53,6 @@ impl InherentDef { helper::check_impl_gen(&item.generics, item.impl_token.span())?, ]; - Ok(InherentDef { index, instances }) + Ok(InherentDef { instances }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index 6e12774611ddfd0b4cdf53468df074dc7315e382..f55b166c7917cb019d82bd243539cea6aa0862b3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -109,10 +109,9 @@ impl Def { let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; match pallet_attr { - Some(PalletAttr::Config(span, with_default)) if config.is_none() => + Some(PalletAttr::Config(_, with_default)) if config.is_none() => config = Some(config::ConfigDef::try_from( &frame_system, - span, index, item, with_default, @@ -122,7 +121,7 @@ impl Def { pallet_struct = Some(p); }, Some(PalletAttr::Hooks(span)) if hooks.is_none() => { - let m = hooks::HooksDef::try_from(span, index, item)?; + let m = hooks::HooksDef::try_from(span, item)?; hooks = Some(m); }, Some(PalletAttr::RuntimeCall(cw, span)) if call.is_none() => @@ -162,27 +161,27 @@ impl Def { genesis_config = Some(g); }, Some(PalletAttr::GenesisBuild(span)) if genesis_build.is_none() => { - let g = genesis_build::GenesisBuildDef::try_from(span, index, item)?; + let g = genesis_build::GenesisBuildDef::try_from(span, item)?; genesis_build = Some(g); }, Some(PalletAttr::RuntimeOrigin(_)) if origin.is_none() => - origin = Some(origin::OriginDef::try_from(index, item)?), + origin = Some(origin::OriginDef::try_from(item)?), Some(PalletAttr::Inherent(_)) if inherent.is_none() => - inherent = Some(inherent::InherentDef::try_from(index, item)?), + inherent = Some(inherent::InherentDef::try_from(item)?), Some(PalletAttr::Storage(span)) => storages.push(storage::StorageDef::try_from(span, index, item, dev_mode)?), Some(PalletAttr::ValidateUnsigned(_)) if validate_unsigned.is_none() => { - let v = validate_unsigned::ValidateUnsignedDef::try_from(index, item)?; + let v = validate_unsigned::ValidateUnsignedDef::try_from(item)?; validate_unsigned = Some(v); }, Some(PalletAttr::TypeValue(span)) => type_values.push(type_value::TypeValueDef::try_from(span, index, item)?), Some(PalletAttr::ExtraConstants(_)) => extra_constants = - Some(extra_constants::ExtraConstantsDef::try_from(index, item)?), + Some(extra_constants::ExtraConstantsDef::try_from(item)?), Some(PalletAttr::Composite(span)) => { let composite = - composite::CompositeDef::try_from(span, index, &frame_support, item)?; + composite::CompositeDef::try_from(span, &frame_support, item)?; if composites.iter().any(|def| { match (&def.composite_keyword, &composite.composite_keyword) { ( @@ -722,7 +721,6 @@ impl syn::parse::Parse for PalletAttr { #[derive(Clone)] pub struct InheritedCallWeightAttr { pub typename: syn::Type, - pub span: proc_macro2::Span, } impl syn::parse::Parse for InheritedCallWeightAttr { @@ -744,6 +742,6 @@ impl syn::parse::Parse for InheritedCallWeightAttr { return Err(lookahead.error()) }; - Ok(Self { typename: buffer.parse()?, span: input.span() }) + Ok(Self { typename: buffer.parse()? }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/origin.rs b/substrate/frame/support/procedural/src/pallet/parse/origin.rs index 76e2a8841196b9a7c85f220a4698b0661c3c4e11..11311b3d5033c382a9f34a6003f6cba7d70d29d5 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/origin.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/origin.rs @@ -25,16 +25,13 @@ use syn::spanned::Spanned; /// * `struct Origin` /// * `enum Origin` pub struct OriginDef { - /// The index of item in pallet module. - pub index: usize, - pub has_instance: bool, pub is_generic: bool, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, } impl OriginDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item_span = item.span(); let (vis, ident, generics) = match &item { syn::Item::Enum(item) => (&item.vis, &item.ident, &item.generics), @@ -46,7 +43,6 @@ impl OriginDef { }, }; - let has_instance = generics.params.len() == 2; let is_generic = !generics.params.is_empty(); let mut instances = vec![]; @@ -67,6 +63,6 @@ impl OriginDef { return Err(syn::Error::new(ident.span(), msg)) } - Ok(OriginDef { index, has_instance, is_generic, instances }) + Ok(OriginDef { is_generic, instances }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/tasks.rs b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs index 6405bb415a6f1eed18cab8d4f6c40f8a37f2048b..ed860849a4db438d0cf13a5e866733f3d1936da4 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/tasks.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs @@ -34,8 +34,8 @@ use syn::{ parse2, spanned::Spanned, token::{Bracket, Paren, PathSep, Pound}, - Attribute, Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, Path, - PathArguments, Result, TypePath, + Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, Path, PathArguments, + Result, TypePath, }; pub mod keywords { @@ -180,7 +180,6 @@ pub struct TaskDef { pub condition_attr: TaskConditionAttr, pub list_attr: TaskListAttr, pub weight_attr: TaskWeightAttr, - pub normal_attrs: Vec, pub item: ImplItemFn, pub arg_names: Vec, } @@ -190,7 +189,7 @@ impl syn::parse::Parse for TaskDef { let item = input.parse::()?; // we only want to activate TaskAttrType parsing errors for tasks-related attributes, // so we filter them here - let (task_attrs, normal_attrs) = partition_task_attrs(&item); + let task_attrs = partition_task_attrs(&item).0; let task_attrs: Vec = task_attrs .into_iter() @@ -293,15 +292,7 @@ impl syn::parse::Parse for TaskDef { let list_attr = list_attr.try_into().expect("we check the type above; QED"); let weight_attr = weight_attr.try_into().expect("we check the type above; QED"); - Ok(TaskDef { - index_attr, - condition_attr, - list_attr, - weight_attr, - normal_attrs, - item, - arg_names, - }) + Ok(TaskDef { index_attr, condition_attr, list_attr, weight_attr, item, arg_names }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/type_value.rs b/substrate/frame/support/procedural/src/pallet/parse/type_value.rs index 4d9db30b3a788354ab5a650c9780717dbb649ee0..b9c0635bb3f5e1fbac87d68bdd11c782d7659ed1 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/type_value.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/type_value.rs @@ -28,12 +28,8 @@ pub struct TypeValueDef { pub ident: syn::Ident, /// The type return by Get. pub type_: Box, - /// The block returning the value to get - pub block: Box, /// If type value is generic over `T` (or `T` and `I` for instantiable pallet) pub is_generic: bool, - /// A set of usage of instance, must be check for consistency with config. - pub instances: Vec, /// The where clause of the function. pub where_clause: Option, /// The span of the pallet::type_value attribute. @@ -90,7 +86,6 @@ impl TypeValueDef { let vis = item.vis.clone(); let ident = item.sig.ident.clone(); - let block = item.block.clone(); let type_ = match item.sig.output.clone() { syn::ReturnType::Type(_, type_) => type_, syn::ReturnType::Default => { @@ -99,25 +94,11 @@ impl TypeValueDef { }, }; - let mut instances = vec![]; - if let Some(usage) = helper::check_type_value_gen(&item.sig.generics, item.sig.span())? { - instances.push(usage); - } + helper::check_type_value_gen(&item.sig.generics, item.sig.span())?; let is_generic = item.sig.generics.type_params().count() > 0; let where_clause = item.sig.generics.where_clause.clone(); - Ok(TypeValueDef { - attr_span, - index, - is_generic, - vis, - ident, - block, - type_, - instances, - where_clause, - docs, - }) + Ok(TypeValueDef { attr_span, index, is_generic, vis, ident, type_, where_clause, docs }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs b/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs index 2bf0a1b6c1886632b0abc52661cded9e5e227ed3..038db0d325813ddfc9badad3789365616b053f3b 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs @@ -19,15 +19,10 @@ use super::helper; use syn::spanned::Spanned; /// The definition of the pallet validate unsigned implementation. -pub struct ValidateUnsignedDef { - /// The index of validate unsigned item in pallet module. - pub index: usize, - /// A set of usage of instance, must be check for consistency with config. - pub instances: Vec, -} +pub struct ValidateUnsignedDef {} impl ValidateUnsignedDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -52,11 +47,9 @@ impl ValidateUnsignedDef { return Err(syn::Error::new(item.span(), msg)) } - let instances = vec![ - helper::check_pallet_struct_usage(&item.self_ty)?, - helper::check_impl_gen(&item.generics, item.impl_token.span())?, - ]; + helper::check_pallet_struct_usage(&item.self_ty)?; + helper::check_impl_gen(&item.generics, item.impl_token.span())?; - Ok(ValidateUnsignedDef { index, instances }) + Ok(ValidateUnsignedDef {}) } } diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs index 43f11896808c71aed001a4660f3a73de2825dc8b..3cdfb06cb6eaa65c71777688421d5ef28a77fb11 100644 --- a/substrate/frame/support/procedural/src/runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -99,14 +99,20 @@ fn construct_runtime_implicit_to_explicit( for pallet in definition.pallet_decls.iter() { let pallet_path = &pallet.path; let pallet_name = &pallet.name; - let pallet_instance = pallet.instance.as_ref().map(|instance| quote::quote!(<#instance>)); + let runtime_param = &pallet.runtime_param; + let pallet_segment_and_instance = match (&pallet.pallet_segment, &pallet.instance) { + (Some(segment), Some(instance)) => quote::quote!(::#segment<#runtime_param, #instance>), + (Some(segment), None) => quote::quote!(::#segment<#runtime_param>), + (None, Some(instance)) => quote::quote!(<#instance>), + (None, None) => quote::quote!(), + }; expansion = quote::quote!( #frame_support::__private::tt_call! { macro = [{ #pallet_path::tt_default_parts_v2 }] your_tt_return = [{ #frame_support::__private::tt_return }] ~~> #frame_support::match_and_insert! { target = [{ #expansion }] - pattern = [{ #pallet_name = #pallet_path #pallet_instance }] + pattern = [{ #pallet_name = #pallet_path #pallet_segment_and_instance }] } } ); diff --git a/substrate/frame/support/procedural/src/runtime/parse/mod.rs b/substrate/frame/support/procedural/src/runtime/parse/mod.rs index dd83cd0da90a2185ebc5015daadba0a9d00ac438..a3d1c9417df81208a4e7f9e9b0a9e00afe9d1ca4 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/mod.rs @@ -109,7 +109,6 @@ pub enum AllPalletsDeclaration { /// Declaration of a runtime with some pallet with implicit declaration of parts. #[derive(Debug, Clone)] pub struct ImplicitAllPalletsDeclaration { - pub name: Ident, pub pallet_decls: Vec, pub pallet_count: usize, } @@ -123,7 +122,6 @@ pub struct ExplicitAllPalletsDeclaration { pub struct Def { pub input: TokenStream2, - pub item: syn::ItemMod, pub runtime_struct: runtime_struct::RuntimeStructDef, pub pallets: AllPalletsDeclaration, pub runtime_types: Vec, @@ -161,8 +159,8 @@ impl Def { helper::take_first_item_runtime_attr::(item)? { match runtime_attr { - RuntimeAttr::Runtime(span) if runtime_struct.is_none() => { - let p = runtime_struct::RuntimeStructDef::try_from(span, item)?; + RuntimeAttr::Runtime(_) if runtime_struct.is_none() => { + let p = runtime_struct::RuntimeStructDef::try_from(item)?; runtime_struct = Some(p); }, RuntimeAttr::Derive(_, types) if runtime_types.is_none() => { @@ -189,7 +187,7 @@ impl Def { match *pallet_item.ty.clone() { syn::Type::Path(ref path) => { let pallet_decl = - PalletDeclaration::try_from(item.span(), &pallet_item, path)?; + PalletDeclaration::try_from(item.span(), &pallet_item, &path.path)?; if let Some(used_pallet) = names.insert(pallet_decl.name.clone(), pallet_decl.name.span()) @@ -240,7 +238,6 @@ impl Def { let decl_count = pallet_decls.len(); let pallets = if decl_count > 0 { AllPalletsDeclaration::Implicit(ImplicitAllPalletsDeclaration { - name, pallet_decls, pallet_count: decl_count.saturating_add(pallets.len()), }) @@ -250,7 +247,6 @@ impl Def { let def = Def { input, - item, runtime_struct: runtime_struct.ok_or_else(|| { syn::Error::new(item_span, "Missing Runtime. Please add a struct inside the module and annotate it with `#[runtime::runtime]`" @@ -267,3 +263,24 @@ impl Def { Ok(def) } } + +#[test] +fn runtime_parsing_works() { + let def = Def::try_from(syn::parse_quote! { + #[runtime::runtime] + mod runtime { + #[runtime::derive(RuntimeCall, RuntimeEvent)] + #[runtime::runtime] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system::Pallet; + + #[runtime::pallet_index(1)] + pub type Pallet1 = pallet1; + } + }) + .expect("Failed to parse runtime definition"); + + assert_eq!(def.runtime_struct.ident, "Runtime"); +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs index 09f5290541d3a6ac038cd863a96330f10b605c44..ebfd0c9ccceed5156d48c8704cae2eb14a3423b5 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -15,10 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}; +use crate::{ + construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}, + runtime::parse::PalletDeclaration, +}; use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; -use syn::{punctuated::Punctuated, spanned::Spanned, token, Error, Ident, PathArguments}; +use syn::{punctuated::Punctuated, token, Error}; impl Pallet { pub fn try_from( @@ -55,20 +58,10 @@ impl Pallet { "Invalid pallet declaration, expected a path or a trait object", ))?; - let mut instance = None; - if let Some(segment) = path.inner.segments.iter_mut().find(|seg| !seg.arguments.is_empty()) - { - if let PathArguments::AngleBracketed(syn::AngleBracketedGenericArguments { - args, .. - }) = segment.arguments.clone() - { - if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { - instance = - Some(Ident::new(&arg_path.to_token_stream().to_string(), arg_path.span())); - segment.arguments = PathArguments::None; - } - } - } + let PalletDeclaration { path: inner, instance, .. } = + PalletDeclaration::try_from(attr_span, item, &path.inner)?; + + path = PalletPath { inner }; pallet_parts = pallet_parts .into_iter() @@ -101,3 +94,95 @@ impl Pallet { }) } } + +#[test] +fn pallet_parsing_works() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = + Pallet::try_from(proc_macro2::Span::call_site(), &item, index, false, false, &bounds) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, None); +} + +#[test] +fn pallet_parsing_works_with_instance() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = + Pallet::try_from(proc_macro2::Span::call_site(), &item, index, false, false, &bounds) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, Some(parse_quote! { Instance1 })); +} + +#[test] +fn pallet_parsing_works_with_pallet() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system::Pallet + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = + Pallet::try_from(proc_macro2::Span::call_site(), &item, index, false, false, &bounds) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, None); +} + +#[test] +fn pallet_parsing_works_with_instance_and_pallet() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system::Pallet + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = + Pallet::try_from(proc_macro2::Span::call_site(), &item, index, false, false, &bounds) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, Some(parse_quote! { Instance1 })); +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs index e167d37d5f14099c1f8f656e9aa5e6e5e44780b5..d34df77b7cfc1c0d11870046a716dea0b512ab2e 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs @@ -15,18 +15,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use quote::ToTokens; -use syn::{spanned::Spanned, Attribute, Ident, PathArguments}; +use syn::{Ident, PathArguments}; /// The declaration of a pallet. #[derive(Debug, Clone)] pub struct PalletDeclaration { /// The name of the pallet, e.g.`System` in `pub type System = frame_system`. pub name: Ident, - /// Optional attributes tagged right above a pallet declaration. - pub attrs: Vec, /// The path of the pallet, e.g. `frame_system` in `pub type System = frame_system`. pub path: syn::Path, + /// The segment of the pallet, e.g. `Pallet` in `pub type System = frame_system::Pallet`. + pub pallet_segment: Option, + /// The runtime parameter of the pallet, e.g. `Runtime` in + /// `pub type System = frame_system::Pallet`. + pub runtime_param: Option, /// The instance of the pallet, e.g. `Instance1` in `pub type Council = /// pallet_collective`. pub instance: Option, @@ -36,26 +38,135 @@ impl PalletDeclaration { pub fn try_from( _attr_span: proc_macro2::Span, item: &syn::ItemType, - path: &syn::TypePath, + path: &syn::Path, ) -> syn::Result { let name = item.ident.clone(); - let mut path = path.path.clone(); + let mut path = path.clone(); + let mut pallet_segment = None; + let mut runtime_param = None; let mut instance = None; if let Some(segment) = path.segments.iter_mut().find(|seg| !seg.arguments.is_empty()) { if let PathArguments::AngleBracketed(syn::AngleBracketedGenericArguments { args, .. }) = segment.arguments.clone() { - if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { - instance = - Some(Ident::new(&arg_path.to_token_stream().to_string(), arg_path.span())); + if segment.ident == "Pallet" { + let mut segment = segment.clone(); segment.arguments = PathArguments::None; + pallet_segment = Some(segment.clone()); + } + let mut args_iter = args.iter(); + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = + args_iter.next() + { + let ident = arg_path.path.require_ident()?.clone(); + if segment.ident == "Pallet" { + runtime_param = Some(ident); + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = + args_iter.next() + { + instance = Some(arg_path.path.require_ident()?.clone()); + } + } else { + instance = Some(ident); + segment.arguments = PathArguments::None; + } } } } - Ok(Self { name, path, instance, attrs: item.attrs.clone() }) + if pallet_segment.is_some() { + path = syn::Path { + leading_colon: None, + segments: path + .segments + .iter() + .filter(|seg| seg.arguments.is_empty()) + .cloned() + .collect(), + }; + } + + Ok(Self { name, path, pallet_segment, runtime_param, instance }) } } + +#[test] +fn declaration_works() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system; }, + &parse_quote! { frame_system }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + assert_eq!(decl.pallet_segment, None); + assert_eq!(decl.runtime_param, None); + assert_eq!(decl.instance, None); +} + +#[test] +fn declaration_works_with_instance() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system; }, + &parse_quote! { frame_system }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + assert_eq!(decl.pallet_segment, None); + assert_eq!(decl.runtime_param, None); + assert_eq!(decl.instance, Some(parse_quote! { Instance1 })); +} + +#[test] +fn declaration_works_with_pallet() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system::Pallet; }, + &parse_quote! { frame_system::Pallet }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + + let segment: syn::PathSegment = + syn::PathSegment { ident: parse_quote! { Pallet }, arguments: PathArguments::None }; + assert_eq!(decl.pallet_segment, Some(segment)); + assert_eq!(decl.runtime_param, Some(parse_quote! { Runtime })); + assert_eq!(decl.instance, None); +} + +#[test] +fn declaration_works_with_pallet_and_instance() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system::Pallet; }, + &parse_quote! { frame_system::Pallet }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + + let segment: syn::PathSegment = + syn::PathSegment { ident: parse_quote! { Pallet }, arguments: PathArguments::None }; + assert_eq!(decl.pallet_segment, Some(segment)); + assert_eq!(decl.runtime_param, Some(parse_quote! { Runtime })); + assert_eq!(decl.instance, Some(parse_quote! { Instance1 })); +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs b/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs index 8fa746ee80727d7164d5dadb0f728fc7991362f7..33c845ee946b56f02df2575de780343bf7c1cddc 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs @@ -18,11 +18,10 @@ use syn::spanned::Spanned; pub struct RuntimeStructDef { pub ident: syn::Ident, - pub attr_span: proc_macro2::Span, } impl RuntimeStructDef { - pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Struct(item) = item { item } else { @@ -30,6 +29,6 @@ impl RuntimeStructDef { return Err(syn::Error::new(item.span(), msg)) }; - Ok(Self { ident: item.ident.clone(), attr_span }) + Ok(Self { ident: item.ident.clone() }) } } diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index a75307aca79b6ff241611b49b60c931ee1f83373..f672740e57e85f8ed79407b31c064d5d1b089cce 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -15,8 +15,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -proc-macro-crate = "3.0.0" -proc-macro2 = "1.0.56" +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["extra-traits", "full", "visit"], workspace = true } -frame-support-procedural-tools-derive = { path = "derive" } +frame-support-procedural-tools-derive = { workspace = true, default-features = true } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index b39d99a822fb7aed533bc7795daa53c903cc2952..2292c2a7c7247f5b58903fd066e9de2ffaf86aeb 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -18,6 +18,6 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = "1.0.56" +proc-macro2 = { workspace = true } quote = { features = ["proc-macro"], workspace = true } syn = { features = ["extra-traits", "full", "parsing", "proc-macro"], workspace = true } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 8ae1f56b4d686e6d6aa637aac1f6f2a0973e6157..94f4b9dd4bdcc214de7ec1b02db3e06dc87d3383 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -508,6 +508,29 @@ pub use frame_support_procedural::{ construct_runtime, match_and_insert, transactional, PalletError, RuntimeDebugNoBound, }; +/// Construct a runtime, with the given name and the given pallets. +/// +/// # Example: +#[doc = docify::embed!("src/tests/runtime.rs", runtime_macro)] +/// +/// # Supported Attributes: +/// +/// ## Legacy Ordering +/// +/// An optional attribute can be defined as #[frame_support::runtime(legacy_ordering)] to +/// ensure that the order of hooks is same as the order of pallets (and not based on the +/// pallet_index). This is to support legacy runtimes and should be avoided for new ones. +/// +/// # Note +/// +/// The population of the genesis storage depends on the order of pallets. So, if one of your +/// pallets depends on another pallet, the pallet that is depended upon needs to come before +/// the pallet depending on it. +/// +/// # Type definitions +/// +/// * The macro generates a type alias for each pallet to their `Pallet`. E.g. `type System = +/// frame_system::Pallet` pub use frame_support_procedural::runtime; #[doc(hidden)] @@ -2274,6 +2297,18 @@ pub mod pallet_macros { /// } /// ``` /// + /// ### Value Trait Bounds + /// + /// To use a type as the value of a storage type, be it `StorageValue`, `StorageMap` or + /// anything else, you need to meet a number of trait bound constraints. + /// + /// See: . + /// + /// Notably, all value types need to implement `Encode`, `Decode`, `MaxEncodedLen` and + /// `TypeInfo`, and possibly `Default`, if + /// [`ValueQuery`](frame_support::storage::types::ValueQuery) is used, explained in the + /// next section. + /// /// ### QueryKind /// /// Every storage type mentioned above has a generic type called diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 968639e02d35be13160918619a77041ceca20be6..fa018d743653f2131944d188af7b0dd74057f143 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -17,7 +17,7 @@ use crate::{ defensive, - storage::transactional::with_transaction_opaque_err, + storage::{storage_prefix, transactional::with_transaction_opaque_err}, traits::{ Defensive, GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, SafeMode, StorageVersion, @@ -369,6 +369,118 @@ impl, DbWeight: Get> frame_support::traits } } +/// `RemoveStorage` is a utility struct used to remove a storage item from a specific pallet. +/// +/// This struct is generic over three parameters: +/// - `P` is a type that implements the [`Get`] trait for a static string, representing the pallet's +/// name. +/// - `S` is a type that implements the [`Get`] trait for a static string, representing the storage +/// name. +/// - `DbWeight` is a type that implements the [`Get`] trait for [`RuntimeDbWeight`], providing the +/// weight for database operations. +/// +/// On runtime upgrade, the `on_runtime_upgrade` function will clear the storage from the specified +/// storage, logging the number of keys removed. If the `try-runtime` feature is enabled, the +/// `pre_upgrade` and `post_upgrade` functions can be used to verify the storage removal before and +/// after the upgrade. +/// +/// # Examples: +/// ```ignore +/// construct_runtime! { +/// pub enum Runtime +/// { +/// System: frame_system = 0, +/// +/// SomePallet: pallet_something = 1, +/// +/// YourOtherPallets... +/// } +/// }; +/// +/// parameter_types! { +/// pub const SomePallet: &'static str = "SomePallet"; +/// pub const StorageAccounts: &'static str = "Accounts"; +/// pub const StorageAccountCount: &'static str = "AccountCount"; +/// } +/// +/// pub type Migrations = ( +/// RemoveStorage, +/// RemoveStorage, +/// AnyOtherMigrations... +/// ); +/// +/// pub type Executive = frame_executive::Executive< +/// Runtime, +/// Block, +/// frame_system::ChainContext, +/// Runtime, +/// Migrations +/// >; +/// ``` +/// +/// WARNING: `RemoveStorage` has no guard rails preventing it from bricking the chain if the +/// operation of removing storage for the given pallet would exceed the block weight limit. +/// +/// If your storage has too many keys to be removed in a single block, it is advised to wait for +/// a multi-block scheduler currently under development which will allow for removal of storage +/// items (and performing other heavy migrations) over multiple blocks +/// (see ). +pub struct RemoveStorage, S: Get<&'static str>, DbWeight: Get>( + PhantomData<(P, S, DbWeight)>, +); +impl, S: Get<&'static str>, DbWeight: Get> + frame_support::traits::OnRuntimeUpgrade for RemoveStorage +{ + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let hashed_prefix = storage_prefix(P::get().as_bytes(), S::get().as_bytes()); + let keys_removed = match clear_prefix(&hashed_prefix, None) { + KillStorageResult::AllRemoved(value) => value, + KillStorageResult::SomeRemaining(value) => { + log::error!( + "`clear_prefix` failed to remove all keys for storage `{}` from pallet `{}`. THIS SHOULD NEVER HAPPEN! ๐Ÿšจ", + S::get(), P::get() + ); + value + }, + } as u64; + + log::info!("Removed `{}` `{}` `{}` keys ๐Ÿงน", keys_removed, P::get(), S::get()); + + DbWeight::get().reads_writes(keys_removed + 1, keys_removed) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use crate::storage::unhashed::contains_prefixed_key; + + let hashed_prefix = storage_prefix(P::get().as_bytes(), S::get().as_bytes()); + match contains_prefixed_key(&hashed_prefix) { + true => log::info!("Found `{}` `{}` keys pre-removal ๐Ÿ‘€", P::get(), S::get()), + false => log::warn!( + "Migration RemoveStorage<{}, {}> can be removed (no keys found pre-removal).", + P::get(), + S::get() + ), + }; + Ok(Default::default()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: sp_std::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use crate::storage::unhashed::contains_prefixed_key; + + let hashed_prefix = storage_prefix(P::get().as_bytes(), S::get().as_bytes()); + match contains_prefixed_key(&hashed_prefix) { + true => { + log::error!("`{}` `{}` has keys remaining post-removal โ—", P::get(), S::get()); + return Err("Keys remaining post-removal, this should never happen ๐Ÿšจ".into()) + }, + false => log::info!("No `{}` `{}` keys found post-removal ๐ŸŽ‰", P::get(), S::get()), + }; + Ok(()) + } +} + /// A migration that can proceed in multiple steps. pub trait SteppedMigration { /// The cursor type that stores the progress (aka. state) of this migration. diff --git a/substrate/frame/support/src/tests/mod.rs b/substrate/frame/support/src/tests/mod.rs index 88afa243f0932e797516192f0fd1495f00cd69bf..34652231e3bce6cf9702b6439fa02e2051dee2a8 100644 --- a/substrate/frame/support/src/tests/mod.rs +++ b/substrate/frame/support/src/tests/mod.rs @@ -27,6 +27,7 @@ use sp_runtime::{generic, traits::BlakeTwo256, BuildStorage}; pub use self::frame_system::{pallet_prelude::*, Config, Pallet}; mod inject_runtime_type; +mod runtime; mod storage_alias; mod tasks; @@ -220,12 +221,25 @@ type Header = generic::Header; type UncheckedExtrinsic = generic::UncheckedExtrinsic; type Block = generic::Block; -crate::construct_runtime!( - pub enum Runtime - { - System: self::frame_system, - } -); +#[crate::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = self::frame_system; +} #[crate::derive_impl(self::frame_system::config_preludes::TestDefaultConfig as self::frame_system::DefaultConfig)] impl Config for Runtime { diff --git a/substrate/frame/support/src/tests/runtime.rs b/substrate/frame/support/src/tests/runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..a9d9281f50da36e988482feac010ab778864d7dc --- /dev/null +++ b/substrate/frame/support/src/tests/runtime.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{frame_system, Block}; +use crate::derive_impl; + +#[crate::pallet(dev_mode)] +mod pallet_basic { + use super::frame_system; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} +} + +impl pallet_basic::Config for Runtime {} + +#[crate::pallet(dev_mode)] +mod pallet_with_disabled_call { + use super::frame_system; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} +} + +impl pallet_with_disabled_call::Config for Runtime {} + +#[crate::pallet(dev_mode)] +mod pallet_with_disabled_unsigned { + use super::frame_system; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} +} + +impl pallet_with_disabled_unsigned::Config for Runtime {} + +#[crate::pallet] +mod pallet_with_instance { + use super::frame_system; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} +} + +#[allow(unused)] +type Instance1 = pallet_with_instance::Pallet; + +impl pallet_with_instance::Config for Runtime {} + +#[allow(unused)] +type Instance2 = pallet_with_instance::Pallet; + +impl pallet_with_instance::Config for Runtime {} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +#[docify::export(runtime_macro)] +#[crate::runtime] +mod runtime { + // The main runtime + #[runtime::runtime] + // Runtime Types to be generated + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + // Use the concrete pallet type + #[runtime::pallet_index(0)] + pub type System = frame_system::Pallet; + + // Use path to the pallet + #[runtime::pallet_index(1)] + pub type Basic = pallet_basic; + + // Use the concrete pallet type with instance + #[runtime::pallet_index(2)] + pub type PalletWithInstance1 = pallet_with_instance::Pallet; + + // Use path to the pallet with instance + #[runtime::pallet_index(3)] + pub type PalletWithInstance2 = pallet_with_instance; + + // Ensure that the runtime does not export the calls from the pallet + #[runtime::pallet_index(4)] + #[runtime::disable_call] + pub type PalletWithDisabledCall = pallet_with_disabled_call::Pallet; + + // Ensure that the runtime does not export the unsigned calls from the pallet + #[runtime::pallet_index(5)] + #[runtime::disable_unsigned] + pub type PalletWithDisabledUnsigned = pallet_with_disabled_unsigned::Pallet; +} diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index ccccc5063286602538c0c46293ffabe8ad01181a..1a687cade79f5973a8b2816d0cba40ef250c4b3a 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -351,6 +351,7 @@ pub trait IntegrityTest { /// - [`crate::traits::misc::OffchainWorker`] /// - [`OnIdle`] /// - [`IntegrityTest`] +/// - [`OnPoll`] /// /// ## Ordering /// @@ -363,34 +364,32 @@ pub trait IntegrityTest { /// /// ```mermaid /// graph LR -/// Optional --> BeforeExtrinsics -/// BeforeExtrinsics --> Extrinsics -/// Extrinsics --> AfterExtrinsics -/// subgraph Optional +/// Optional --> Mandatory +/// Mandatory --> ExtrinsicsMandatory +/// ExtrinsicsMandatory --> Poll +/// Poll --> Extrinsics +/// Extrinsics --> AfterMandatory +/// AfterMandatory --> onIdle +/// +/// subgraph Optional /// OnRuntimeUpgrade /// end /// -/// subgraph BeforeExtrinsics +/// subgraph Mandatory /// OnInitialize /// end /// +/// subgraph ExtrinsicsMandatory +/// Inherent1 --> Inherent2 +/// end +/// /// subgraph Extrinsics /// direction TB -/// Inherent1 -/// Inherent2 -/// Extrinsic1 -/// Extrinsic2 -/// -/// Inherent1 --> Inherent2 -/// Inherent2 --> Extrinsic1 /// Extrinsic1 --> Extrinsic2 /// end /// -/// subgraph AfterExtrinsics -/// OnIdle +/// subgraph AfterMandatory /// OnFinalize -/// -/// OnIdle --> OnFinalize /// end /// ``` /// @@ -466,6 +465,8 @@ pub trait Hooks { /// /// Is not guaranteed to execute in a block and should therefore only be used in no-deadline /// scenarios. + /// + /// This is the non-mandatory version of [`Hooks::on_initialize`]. fn on_poll(_n: BlockNumber, _weight: &mut WeightMeter) {} /// Hook executed when a code change (aka. a "runtime upgrade") is detected by the FRAME diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 2eec606b6d18b20a086e4ce7b44a877c3b8fb4d5..3f8d80f5c6baf150db06b6ca24b7f4e69de81539 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -27,7 +27,7 @@ use sp_weights::{Weight, WeightMeter}; /// Errors that can happen when attempting to process a message with /// [`ProcessMessage::process_message()`]. -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, TypeInfo, RuntimeDebug)] +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] pub enum ProcessMessageError { /// The message data format is unknown (e.g. unrecognised header) BadFormat, diff --git a/substrate/frame/support/src/traits/storage.rs b/substrate/frame/support/src/traits/storage.rs index 9e467aea4220dd7296a3ccad8236f122ff39e7d8..875ff56bea19e3c9f5770365833092da58fca6f4 100644 --- a/substrate/frame/support/src/traits/storage.rs +++ b/substrate/frame/support/src/traits/storage.rs @@ -194,7 +194,7 @@ where } /// Some sort of cost taken from account temporarily in order to offset the cost to the chain of -/// holding some data [`Footprint`] in state. +/// holding some data `Footprint` (e.g. [`Footprint`]) in state. /// /// The cost may be increased, reduced or dropped entirely as the footprint changes. /// @@ -206,16 +206,20 @@ where /// treated as one*. Don't type to duplicate it, and remember to drop it when you're done with /// it. #[must_use] -pub trait Consideration: Member + FullCodec + TypeInfo + MaxEncodedLen { +pub trait Consideration: + Member + FullCodec + TypeInfo + MaxEncodedLen +{ /// Create a ticket for the `new` footprint attributable to `who`. This ticket *must* ultimately - /// be consumed through `update` or `drop` once the footprint changes or is removed. - fn new(who: &AccountId, new: Footprint) -> Result; + /// be consumed through `update` or `drop` once the footprint changes or is removed. `None` + /// implies no cost for a given footprint. + fn new(who: &AccountId, new: Footprint) -> Result, DispatchError>; /// Optionally consume an old ticket and alter the footprint, enforcing the new cost to `who` - /// and returning the new ticket (or an error if there was an issue). + /// and returning the new ticket (or an error if there was an issue). `None` implies no cost for + /// a given footprint. /// /// For creating tickets and dropping them, you can use the simpler `new` and `drop` instead. - fn update(self, who: &AccountId, new: Footprint) -> Result; + fn update(self, who: &AccountId, new: Footprint) -> Result, DispatchError>; /// Consume a ticket for some `old` footprint attributable to `who` which should now been freed. fn drop(self, who: &AccountId) -> Result<(), DispatchError>; @@ -230,12 +234,12 @@ pub trait Consideration: Member + FullCodec + TypeInfo + MaxEncodedLe } } -impl Consideration for () { - fn new(_: &A, _: Footprint) -> Result { - Ok(()) +impl Consideration for () { + fn new(_: &A, _: F) -> Result, DispatchError> { + Ok(Some(())) } - fn update(self, _: &A, _: Footprint) -> Result<(), DispatchError> { - Ok(()) + fn update(self, _: &A, _: F) -> Result, DispatchError> { + Ok(Some(())) } fn drop(self, _: &A) -> Result<(), DispatchError> { Ok(()) diff --git a/substrate/frame/support/src/traits/tokens.rs b/substrate/frame/support/src/traits/tokens.rs index 8842b20580181f81e4377a0d9f6223e59a6fee6a..138703cf1d135ef50a228e008fae3d37e0c972a1 100644 --- a/substrate/frame/support/src/traits/tokens.rs +++ b/substrate/frame/support/src/traits/tokens.rs @@ -30,8 +30,8 @@ pub use imbalance::Imbalance; pub mod pay; pub use misc::{ AssetId, Balance, BalanceStatus, ConversionFromAssetBalance, ConversionToAssetBalance, - ConvertRank, DepositConsequence, ExistenceRequirement, Fortitude, GetSalary, Locker, Precision, - Preservation, Provenance, Restriction, UnityAssetBalanceConversion, UnityOrOuterConversion, - WithdrawConsequence, WithdrawReasons, + ConvertRank, DepositConsequence, ExistenceRequirement, Fortitude, GetSalary, IdAmount, Locker, + Precision, Preservation, Provenance, Restriction, UnityAssetBalanceConversion, + UnityOrOuterConversion, WithdrawConsequence, WithdrawReasons, }; pub use pay::{Pay, PayFromAccount, PaymentStatus}; diff --git a/substrate/frame/support/src/traits/tokens/fungible/mod.rs b/substrate/frame/support/src/traits/tokens/fungible/mod.rs index 01c3b9dfe46a562ec5c0692fd51ecc0e53f83872..a113cb01c982dd8f35c9ab28ead961a65219cd71 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/mod.rs @@ -58,13 +58,18 @@ //! 3 holds for 100 units, the account can spend its funds for any reason down to 300 units, at //! which point the holds will start to come into play. //! -//! - **Frozen Balance**: A freeze on a specified amount of an account's free balance until a -//! specified block number. +//! - **Frozen Balance**: A freeze on a specified amount of an account's balance. Tokens that are +//! frozen cannot be transferred. //! //! Multiple freezes always operate over the same funds, so they "overlay" rather than //! "stack". This means that if an account has 3 freezes for 100 units, the account can spend its //! funds for any reason down to 100 units, at which point the freezes will start to come into //! play. +//! +//! It's important to note that the frozen balance can exceed the total balance of the account. +//! This is useful, eg, in cases where you want to prevent a user from transferring any fund. In +//! such a case, setting the frozen balance to `Balance::MAX` would serve that purpose +//! effectively. //! //! - **Minimum Balance (a.k.a. Existential Deposit, a.k.a. ED)**: The minimum balance required to //! create or keep an account open. This is to prevent "dust accounts" from filling storage. When @@ -198,31 +203,40 @@ use crate::{ MaxEncodedLen, RuntimeDebugNoBound, )] -#[scale_info(skip_type_params(A, F, R, D))] +#[scale_info(skip_type_params(A, F, R, D, Fp))] #[codec(mel_bound())] -pub struct FreezeConsideration(F::Balance, PhantomData (A, R, D)>) +pub struct FreezeConsideration(F::Balance, PhantomData (A, R, D, Fp)>) where F: MutateFreeze; impl< A: 'static, F: 'static + MutateFreeze, R: 'static + Get, - D: 'static + Convert, - > Consideration for FreezeConsideration + D: 'static + Convert, + Fp: 'static, + > Consideration for FreezeConsideration { - fn new(who: &A, footprint: Footprint) -> Result { + fn new(who: &A, footprint: Fp) -> Result, DispatchError> { let new = D::convert(footprint); - F::increase_frozen(&R::get(), who, new)?; - Ok(Self(new, PhantomData)) + if new.is_zero() { + Ok(None) + } else { + F::increase_frozen(&R::get(), who, new)?; + Ok(Some(Self(new, PhantomData))) + } } - fn update(self, who: &A, footprint: Footprint) -> Result { + fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { let new = D::convert(footprint); if self.0 > new { F::decrease_frozen(&R::get(), who, self.0 - new)?; } else if new > self.0 { F::increase_frozen(&R::get(), who, new - self.0)?; } - Ok(Self(new, PhantomData)) + if new.is_zero() { + Ok(None) + } else { + Ok(Some(Self(new, PhantomData))) + } } fn drop(self, who: &A) -> Result<(), DispatchError> { F::decrease_frozen(&R::get(), who, self.0).map(|_| ()) @@ -240,31 +254,43 @@ impl< MaxEncodedLen, RuntimeDebugNoBound, )] -#[scale_info(skip_type_params(A, F, R, D))] +#[scale_info(skip_type_params(A, F, R, D, Fp))] #[codec(mel_bound())] -pub struct HoldConsideration(F::Balance, PhantomData (A, R, D)>) +pub struct HoldConsideration( + F::Balance, + PhantomData (A, R, D, Fp)>, +) where F: MutateHold; impl< A: 'static, F: 'static + MutateHold, R: 'static + Get, - D: 'static + Convert, - > Consideration for HoldConsideration + D: 'static + Convert, + Fp: 'static, + > Consideration for HoldConsideration { - fn new(who: &A, footprint: Footprint) -> Result { + fn new(who: &A, footprint: Fp) -> Result, DispatchError> { let new = D::convert(footprint); - F::hold(&R::get(), who, new)?; - Ok(Self(new, PhantomData)) + if new.is_zero() { + Ok(None) + } else { + F::hold(&R::get(), who, new)?; + Ok(Some(Self(new, PhantomData))) + } } - fn update(self, who: &A, footprint: Footprint) -> Result { + fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { let new = D::convert(footprint); if self.0 > new { F::release(&R::get(), who, self.0 - new, BestEffort)?; } else if new > self.0 { F::hold(&R::get(), who, new - self.0)?; } - Ok(Self(new, PhantomData)) + if new.is_zero() { + Ok(None) + } else { + Ok(Some(Self(new, PhantomData))) + } } fn drop(self, who: &A) -> Result<(), DispatchError> { F::release(&R::get(), who, self.0, BestEffort).map(|_| ()) @@ -291,22 +317,34 @@ impl< MaxEncodedLen, RuntimeDebugNoBound, )] -#[scale_info(skip_type_params(A, Fx, Rx, D))] +#[scale_info(skip_type_params(A, Fx, Rx, D, Fp))] #[codec(mel_bound())] -pub struct LoneFreezeConsideration(PhantomData (A, Fx, Rx, D)>); +pub struct LoneFreezeConsideration(PhantomData (A, Fx, Rx, D, Fp)>); impl< A: 'static, Fx: 'static + MutateFreeze, Rx: 'static + Get, - D: 'static + Convert, - > Consideration for LoneFreezeConsideration + D: 'static + Convert, + Fp: 'static, + > Consideration for LoneFreezeConsideration { - fn new(who: &A, footprint: Footprint) -> Result { + fn new(who: &A, footprint: Fp) -> Result, DispatchError> { ensure!(Fx::balance_frozen(&Rx::get(), who).is_zero(), DispatchError::Unavailable); - Fx::set_frozen(&Rx::get(), who, D::convert(footprint), Polite).map(|_| Self(PhantomData)) + let new = D::convert(footprint); + if new.is_zero() { + Ok(None) + } else { + Fx::set_frozen(&Rx::get(), who, new, Polite).map(|_| Some(Self(PhantomData))) + } } - fn update(self, who: &A, footprint: Footprint) -> Result { - Fx::set_frozen(&Rx::get(), who, D::convert(footprint), Polite).map(|_| Self(PhantomData)) + fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { + let new = D::convert(footprint); + let _ = Fx::set_frozen(&Rx::get(), who, new, Polite)?; + if new.is_zero() { + Ok(None) + } else { + Ok(Some(Self(PhantomData))) + } } fn drop(self, who: &A) -> Result<(), DispatchError> { Fx::thaw(&Rx::get(), who).map(|_| ()) @@ -330,22 +368,34 @@ impl< MaxEncodedLen, RuntimeDebugNoBound, )] -#[scale_info(skip_type_params(A, Fx, Rx, D))] +#[scale_info(skip_type_params(A, Fx, Rx, D, Fp))] #[codec(mel_bound())] -pub struct LoneHoldConsideration(PhantomData (A, Fx, Rx, D)>); +pub struct LoneHoldConsideration(PhantomData (A, Fx, Rx, D, Fp)>); impl< A: 'static, F: 'static + MutateHold, R: 'static + Get, - D: 'static + Convert, - > Consideration for LoneHoldConsideration + D: 'static + Convert, + Fp: 'static, + > Consideration for LoneHoldConsideration { - fn new(who: &A, footprint: Footprint) -> Result { + fn new(who: &A, footprint: Fp) -> Result, DispatchError> { ensure!(F::balance_on_hold(&R::get(), who).is_zero(), DispatchError::Unavailable); - F::set_on_hold(&R::get(), who, D::convert(footprint)).map(|_| Self(PhantomData)) + let new = D::convert(footprint); + if new.is_zero() { + Ok(None) + } else { + F::set_on_hold(&R::get(), who, new).map(|_| Some(Self(PhantomData))) + } } - fn update(self, who: &A, footprint: Footprint) -> Result { - F::set_on_hold(&R::get(), who, D::convert(footprint)).map(|_| Self(PhantomData)) + fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { + let new = D::convert(footprint); + let _ = F::set_on_hold(&R::get(), who, new)?; + if new.is_zero() { + Ok(None) + } else { + Ok(Some(Self(PhantomData))) + } } fn drop(self, who: &A) -> Result<(), DispatchError> { F::release_all(&R::get(), who, BestEffort).map(|_| ()) diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs index 424acb1d550b15b69582d004e2e466e65e6a9b3f..e1ff1e058ae7aabd8ce9e0725b51f292c406a1a5 100644 --- a/substrate/frame/support/src/traits/tokens/misc.rs +++ b/substrate/frame/support/src/traits/tokens/misc.rs @@ -17,7 +17,7 @@ //! Miscellaneous types. -use crate::traits::Contains; +use crate::{traits::Contains, TypeInfo}; use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; @@ -357,3 +357,12 @@ impl> GetSalary for ConvertRank { C::convert(rank) } } + +/// An identifier and balance. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct IdAmount { + /// An identifier for this item. + pub id: Id, + /// Some amount for this item. + pub amount: Balance, +} diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 6e861ad769cf71d0c1200b2039f28217a558b335..697e14ae1d38893428f19f41aec441c19cfd1711 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -15,29 +15,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -static_assertions = "1.1.0" +static_assertions = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-state-machine = { path = "../../../primitives/state-machine", optional = true } -frame-support = { path = "..", default-features = false, features = ["experimental"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-version = { path = "../../../primitives/version", default-features = false } -sp-metadata-ir = { path = "../../../primitives/metadata-ir", default-features = false } -trybuild = { version = "1.0.88", features = ["diff"] } -pretty_assertions = "1.3.0" -rustversion = "1.0.6" -frame-system = { path = "../../system", default-features = false } -frame-executive = { path = "../../executive", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-metadata = { features = ["current"], workspace = true } +sp-api = { workspace = true } +sp-arithmetic = { workspace = true } +sp-io = { workspace = true } +sp-state-machine = { optional = true, workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true } +frame-benchmarking = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-version = { workspace = true } +sp-metadata-ir = { workspace = true } +trybuild = { features = ["diff"], workspace = true } +pretty_assertions = { workspace = true } +rustversion = { workspace = true } +frame-system = { workspace = true } +frame-executive = { workspace = true } # The "std" feature for this pallet is never activated on purpose, in order to test construct_runtime error message -test-pallet = { package = "frame-support-test-pallet", path = "pallet", default-features = false } +test-pallet = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index d6e0c66261a993ba70413d89d70dabc107177e1d..60896ca2a10f25401b853e5e297dbbdd14635a8b 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -15,13 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../..", default-features = false } -frame-system = { path = "../../../system", default-features = false } -sp-core = { path = "../../../../primitives/core", default-features = false } -sp-runtime = { path = "../../../../primitives/runtime", default-features = false } -sp-version = { path = "../../../../primitives/version", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-version = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index 8607339a2b054530404499c23a402c2a673e2b5a..cee0eac6f1bcd2a54337d97a968fceab76320133 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -15,12 +15,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true } -frame-support = { path = "../..", default-features = false } -frame-system = { path = "../../../system", default-features = false } -sp-runtime = { path = "../../../../primitives/runtime", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 5b97db60c00bb7c8b5b40dfa9f823fcc27b73d95..8447cc12ef20cac6058d2666971d8501627e0a64 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -15,9 +15,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -frame = { package = "polkadot-sdk-frame", path = "../../..", default-features = false, features = ["experimental", "runtime"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +frame = { features = ["experimental", "runtime"], workspace = true } +scale-info = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs index 5b74cc172c6eb8ed057284ef9dd4c2f91fbf8d7c..6330a138e2f2a66a5b763ecd57d1c742fa092465 100644 --- a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -340,7 +340,7 @@ mod runtime { pub type Module1_9 = module1; } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index a2a8970814b0a7c7c2c3f5ba9536371236c16bbd..3d056c894b92f49c40201fb56de08b92b4c2244d 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -16,24 +16,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -cfg-if = "1.0" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +cfg-if = { workspace = true } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } +scale-info = { features = ["derive", "serde"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -frame-support = { path = "../support", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } -sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } -sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } -docify = "0.2.8" +frame-support = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-std = { workspace = true } +sp-version = { features = ["serde"], workspace = true } +sp-weights = { features = ["serde"], workspace = true } +docify = { workspace = true } [dev-dependencies] -criterion = "0.5.1" -sp-externalities = { path = "../../primitives/externalities" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +criterion = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index 022f0ffce6b5ee23168db0ccaad1da5ea767ddf3..47a6721b93f585bf3b5c0f6a777327ade4f38be6 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "..", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-io = { path = "../../../primitives/io" } -sp-externalities = { path = "../../../primitives/externalities" } -sp-version = { path = "../../../primitives/version" } +sp-io = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/system/rpc/runtime-api/Cargo.toml b/substrate/frame/system/rpc/runtime-api/Cargo.toml index 8b71ca2a13952d4aa1bf983a30d17d3126189524..901a035b6476c7af2e13013a141f3dcafa391a4a 100644 --- a/substrate/frame/system/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/system/rpc/runtime-api/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -sp-api = { path = "../../../../primitives/api", default-features = false } -docify = "0.2.0" +codec = { workspace = true } +sp-api = { workspace = true } +docify = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index 148dfd4aad471b8a51aa3106581531b17090c20a..31fdbba5950e21ceecb8b29ae50934d52f194903 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -28,6 +28,9 @@ use sp_runtime::{ /// Check for transaction mortality. /// +/// The extension adds [`Era`] to every signed extrinsic. It also contributes to the signed data, by +/// including the hash of the block at [`Era::birth`]. +/// /// # Transaction Validity /// /// The extension affects `longevity` of the transaction according to the [`Era`] definition. diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 5d6c68989ed53bfec6d95f80f5c9b149aa4c3063..d4705f200efddacd18149651acf8ed431ca722a8 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{limits::BlockWeights, Config, DispatchClass, Pallet, LOG_TARGET}; +use crate::{limits::BlockWeights, Config, Pallet, LOG_TARGET}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, @@ -106,8 +106,7 @@ where let all_weight = Pallet::::block_weight(); let maximum_weight = T::BlockWeights::get(); let next_weight = - calculate_consumed_weight::(&maximum_weight, all_weight, info)?; - check_combined_proof_size::(info, &maximum_weight, next_len, &next_weight)?; + calculate_consumed_weight::(&maximum_weight, all_weight, info, len)?; Self::check_extrinsic_weight(info)?; crate::AllExtrinsicsLen::::put(next_len); @@ -130,36 +129,6 @@ where } } -/// Check that the combined extrinsic length and proof size together do not exceed the PoV limit. -pub fn check_combined_proof_size( - info: &DispatchInfoOf, - maximum_weight: &BlockWeights, - next_len: u32, - next_weight: &crate::ConsumedWeight, -) -> Result<(), TransactionValidityError> -where - Call: Dispatchable, -{ - // This extra check ensures that the extrinsic length does not push the - // PoV over the limit. - let total_pov_size = next_weight.total().proof_size().saturating_add(next_len as u64); - if total_pov_size > maximum_weight.max_block.proof_size() { - log::debug!( - target: LOG_TARGET, - "Extrinsic exceeds total pov size. Still including if mandatory. size: {}kb, limit: {}kb, is_mandatory: {}", - total_pov_size as f64/1024.0, - maximum_weight.max_block.proof_size() as f64/1024.0, - info.class == DispatchClass::Mandatory - ); - return match info.class { - // Allow mandatory extrinsics - DispatchClass::Mandatory => Ok(()), - _ => Err(InvalidTransaction::ExhaustsResources.into()), - }; - } - Ok(()) -} - /// Checks if the current extrinsic can fit into the block with respect to block weight limits. /// /// Upon successes, it returns the new block weight as a `Result`. @@ -167,12 +136,16 @@ pub fn calculate_consumed_weight( maximum_weight: &BlockWeights, mut all_weight: crate::ConsumedWeight, info: &DispatchInfoOf, + len: usize, ) -> Result where Call: Dispatchable, { - let extrinsic_weight = - info.weight.saturating_add(maximum_weight.get(info.class).base_extrinsic); + // Also Consider extrinsic length as proof weight. + let extrinsic_weight = info + .weight + .saturating_add(maximum_weight.get(info.class).base_extrinsic) + .saturating_add(Weight::from_parts(0, len as u64)); let limit_per_class = maximum_weight.get(info.class); // add the weight. If class is unlimited, use saturating add instead of checked one. @@ -772,168 +745,115 @@ mod tests { &maximum_weight, all_weight.clone(), &mandatory1, + 0 )); assert_err!( calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, all_weight, &mandatory2, + 0 ), InvalidTransaction::ExhaustsResources ); } #[test] - fn maximum_proof_size_includes_length() { + fn proof_size_includes_length() { let maximum_weight = BlockWeights::builder() .base_block(Weight::zero()) .for_class(DispatchClass::non_mandatory(), |w| { w.base_extrinsic = Weight::zero(); - w.max_total = Some(Weight::from_parts(20, 10)); + w.max_total = Some(Weight::from_parts(20, 1000)); }) .for_class(DispatchClass::Mandatory, |w| { w.base_extrinsic = Weight::zero(); - w.reserved = Some(Weight::from_parts(5, 10)); - w.max_total = None; + w.max_total = Some(Weight::from_parts(20, 1000)); }) .build_or_panic(); + let all_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(5, 0), + DispatchClass::Operational => Weight::from_parts(5, 0), + DispatchClass::Mandatory => Weight::from_parts(0, 0), + }); - assert_eq!(maximum_weight.max_block, Weight::from_parts(20, 10)); + let normal = DispatchInfo { + weight: Weight::from_parts(5, 0), + class: DispatchClass::Normal, + ..Default::default() + }; - let info = DispatchInfo { class: DispatchClass::Normal, ..Default::default() }; - let mandatory = DispatchInfo { class: DispatchClass::Mandatory, ..Default::default() }; - // We have 10 reftime and 5 proof size left over. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(10, 5), - DispatchClass::Operational => Weight::from_parts(0, 0), - DispatchClass::Mandatory => Weight::zero(), - }); + let mandatory = DispatchInfo { + weight: Weight::from_parts(5, 0), + class: DispatchClass::Mandatory, + ..Default::default() + }; - // Simple checks for the length - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + // Using 0 length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, + all_weight.clone(), + &normal, 0, - &next_weight - )); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + ) + .unwrap(); + + assert_eq!(consumed.total().saturating_sub(all_weight.total()), normal.weight); + + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 5, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 6, - &next_weight - ), - InvalidTransaction::ExhaustsResources - ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( + all_weight.clone(), &mandatory, - &maximum_weight, - 6, - &next_weight - )); + 0, + ) + .unwrap(); + assert_eq!(consumed.total().saturating_sub(all_weight.total()), mandatory.weight); - // We have 10 reftime and 0 proof size left over. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(10, 10), - DispatchClass::Operational => Weight::from_parts(0, 0), - DispatchClass::Mandatory => Weight::zero(), - }); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + // Using non zero length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 0, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 1, - &next_weight - ), - InvalidTransaction::ExhaustsResources + all_weight.clone(), + &normal, + 100, + ) + .unwrap(); + // Must account for the len in the proof size + assert_eq!( + consumed.total().saturating_sub(all_weight.total()), + normal.weight.add_proof_size(100) ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &mandatory, - &maximum_weight, - 1, - &next_weight - )); - // We have 10 reftime and 2 proof size left over. - // Used weight is spread across dispatch classes this time. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(10, 5), - DispatchClass::Operational => Weight::from_parts(0, 3), - DispatchClass::Mandatory => Weight::zero(), - }); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 0, - &next_weight - )); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 2, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 3, - &next_weight - ), - InvalidTransaction::ExhaustsResources - ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( + all_weight.clone(), &mandatory, - &maximum_weight, - 3, - &next_weight - )); + 100, + ) + .unwrap(); + // Must account for the len in the proof size + assert_eq!( + consumed.total().saturating_sub(all_weight.total()), + mandatory.weight.add_proof_size(100) + ); - // Ref time is over the limit. Should not happen, but we should make sure that it is - // ignored. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(30, 5), - DispatchClass::Operational => Weight::from_parts(0, 0), - DispatchClass::Mandatory => Weight::zero(), - }); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + // Using oversized zero length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 0, - &next_weight - )); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 5, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 6, - &next_weight - ), - InvalidTransaction::ExhaustsResources + all_weight.clone(), + &normal, + 2000, ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &mandatory, + // errors out + assert_eq!(consumed, Err(InvalidTransaction::ExhaustsResources.into())); + + // Using oversized zero length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 6, - &next_weight - )); + all_weight.clone(), + &mandatory, + 2000, + ); + // errors out + assert_eq!(consumed, Err(InvalidTransaction::ExhaustsResources.into())); } } diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index 93ce09611b55dc49746ad5e99fa98b21cf25ea6d..2da6e2d5a95d222853e7eeca372c770de84f77ea 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -17,24 +17,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false, optional = true } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-storage = { path = "../../primitives/storage", default-features = false } -sp-timestamp = { path = "../../primitives/timestamp", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { optional = true, workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-timestamp = { workspace = true } -docify = "0.2.8" +docify = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/timestamp/src/benchmarking.rs b/substrate/frame/timestamp/src/benchmarking.rs index 82dfdfa8b312065aededecd7e81404c19403fe24..d8c27b4967af9a59a6a1711b8aa53726d4968fd2 100644 --- a/substrate/frame/timestamp/src/benchmarking.rs +++ b/substrate/frame/timestamp/src/benchmarking.rs @@ -25,7 +25,7 @@ use frame_support::{ensure, traits::OnFinalize}; use frame_system::RawOrigin; use sp_storage::TrackedStorageKey; -use crate::Pallet as Timestamp; +use crate::{Now, Pallet as Timestamp}; const MAX_TIME: u32 = 100; @@ -42,7 +42,7 @@ benchmarks! { }); }: _(RawOrigin::None, t.into()) verify { - ensure!(Timestamp::::now() == t.into(), "Time was not set."); + ensure!(Now::::get() == t.into(), "Time was not set."); } on_finalize { diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index 5269f17eca6b2151a787f91a1126c822aaf83879..6a22ab1cd5ef8fde0d94791e82750f46629a2340 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -202,7 +202,6 @@ pub mod pallet { /// The current time for the current block. #[pallet::storage] - #[pallet::getter(fn now)] pub type Now = StorageValue<_, T::Moment, ValueQuery>; /// Whether the timestamp has been updated in this block. @@ -261,7 +260,7 @@ pub mod pallet { pub fn set(origin: OriginFor, #[pallet::compact] now: T::Moment) -> DispatchResult { ensure_none(origin)?; assert!(!DidUpdate::::exists(), "Timestamp must be updated only once in the block"); - let prev = Self::now(); + let prev = Now::::get(); assert!( prev.is_zero() || now >= prev + T::MinimumPeriod::get(), "Timestamp must increment by at least between sequential blocks" @@ -296,7 +295,7 @@ pub mod pallet { .expect("Timestamp inherent data must be provided"); let data = (*inherent_data).saturated_into::(); - let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); + let next_time = cmp::max(data, Now::::get() + T::MinimumPeriod::get()); Some(Call::set { now: next_time }) } @@ -317,7 +316,7 @@ pub mod pallet { .expect("Timestamp inherent data not correctly encoded") .expect("Timestamp inherent data must be provided"); - let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); + let minimum = (Now::::get() + T::MinimumPeriod::get()).saturated_into::(); if t > *(data + MAX_TIMESTAMP_DRIFT_MILLIS) { Err(InherentError::TooFarInFuture) } else if t < minimum { @@ -339,7 +338,7 @@ impl Pallet { /// NOTE: if this function is called prior to setting the timestamp, /// it will return the timestamp of the previous block. pub fn get() -> T::Moment { - Self::now() + Now::::get() } /// Set the timestamp to something in particular. Only used for tests. @@ -356,7 +355,7 @@ impl Time for Pallet { type Moment = T::Moment; fn now() -> Self::Moment { - Self::now() + Now::::get() } } @@ -367,7 +366,7 @@ impl UnixTime for Pallet { fn now() -> core::time::Duration { // now is duration since unix epoch in millisecond as documented in // `sp_timestamp::InherentDataProvider`. - let now = Self::now(); + let now = Now::::get(); sp_std::if_std! { if now == T::Moment::zero() { log::error!( diff --git a/substrate/frame/timestamp/src/tests.rs b/substrate/frame/timestamp/src/tests.rs index cc49d8a3296e831ae01fd60ab28c0cf7fafbf9b5..a83855561889f162c0787400e700ef6464fb854f 100644 --- a/substrate/frame/timestamp/src/tests.rs +++ b/substrate/frame/timestamp/src/tests.rs @@ -25,7 +25,7 @@ fn timestamp_works() { new_test_ext().execute_with(|| { crate::Now::::put(46); assert_ok!(Timestamp::set(RuntimeOrigin::none(), 69)); - assert_eq!(Timestamp::now(), 69); + assert_eq!(crate::Now::::get(), 69); assert_eq!(Some(69), get_captured_moment()); }); } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index bcd54461406ead0f1bd67ab427c023d91b63fa0e..abebaa19aedaea145ad31be7d6af4abc46aad4dd 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -16,22 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-treasury = { path = "../treasury", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-treasury = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-storage = { path = "../../primitives/storage" } +pallet-balances = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/tips/src/lib.rs b/substrate/frame/tips/src/lib.rs index 8c360fb57d72488553529ab4264eb7f8b444c064..a75aed89d0b44866aee5453743f8865894660dc3 100644 --- a/substrate/frame/tips/src/lib.rs +++ b/substrate/frame/tips/src/lib.rs @@ -169,6 +169,9 @@ pub mod pallet { /// update weights file when altering this method. type Tippers: SortedMembers + ContainsLengthBound; + /// Handler for the unbalanced decrease when slashing for a removed tip. + type OnSlash: OnUnbalanced>; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -177,7 +180,6 @@ pub mod pallet { /// This has the insecure enumerable hash function since the key itself is already /// guaranteed to be a secure hash. #[pallet::storage] - #[pallet::getter(fn tips)] pub type Tips, I: 'static = ()> = StorageMap< _, Twox64Concat, @@ -189,7 +191,6 @@ pub mod pallet { /// Simple preimage lookup from the reason's hash to the original data. Again, has an /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. #[pallet::storage] - #[pallet::getter(fn reasons)] pub type Reasons, I: 'static = ()> = StorageMap<_, Identity, T::Hash, Vec, OptionQuery>; @@ -489,6 +490,18 @@ pub mod pallet { impl, I: 'static> Pallet { // Add public immutables and private mutables. + /// Access tips storage from outside + pub fn tips( + hash: T::Hash, + ) -> Option, BlockNumberFor, T::Hash>> { + Tips::::get(hash) + } + + /// Access reasons storage from outside + pub fn reasons(hash: T::Hash) -> Option> { + Reasons::::get(hash) + } + /// The account ID of the treasury pot. /// /// This actually does computation. If you need to keep using it, then make sure you cache the diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs index 78df3736815a11dcc8a766e6eb88c4972157a7b3..7e4a9368ad0c6abdf8930195e32fb982a162c994 100644 --- a/substrate/frame/tips/src/tests.rs +++ b/substrate/frame/tips/src/tests.rs @@ -65,20 +65,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { static TenToFourteenTestValue: Vec = vec![10,11,12,13,14]; @@ -105,7 +94,6 @@ impl ContainsLengthBound for TenToFourteen { } } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2"); @@ -116,13 +104,8 @@ parameter_types! { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -143,13 +126,8 @@ impl pallet_treasury::Config for Test { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId2; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -180,6 +158,7 @@ impl Config for Test { type DataDepositPerByte = ConstU64<1>; type MaxTipAmount = ConstU64<10_000_000>; type RuntimeEvent = RuntimeEvent; + type OnSlash = (); type WeightInfo = (); } @@ -192,6 +171,7 @@ impl Config for Test { type DataDepositPerByte = ConstU64<1>; type MaxTipAmount = ConstU64<10_000_000>; type RuntimeEvent = RuntimeEvent; + type OnSlash = (); type WeightInfo = (); } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 4f7da9ae46fabe7b4dcb92bd40eab2ea339175a8..3e890a8f7b69f53b80376b78bb014a8c57726301 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index 177621d9adbd102369fe0466cf962441f7591e42..02ce212deb6a7b4982996945f1604ccd03ed4f29 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -17,21 +17,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } -pallet-transaction-payment = { path = "..", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-transaction-payment = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-storage = { path = "../../../primitives/storage", default-features = false } -pallet-assets = { path = "../../assets" } -pallet-balances = { path = "../../balances" } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-storage = { workspace = true } +pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index cc43cffd7deba11d252875d3b981e0a43eeafb1e..3f8c7bc0ea34d297dcb8ace6cb25735d9e1592a6 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -98,20 +98,10 @@ parameter_types! { pub const ExistentialDeposit: u64 = 10; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl WeightToFeeT for WeightToFee { diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index a4a8efad869c84ef9fe8b905ae5c38a9ed94b614..1b92ecf53c0266606b3653911e305c4f069c1b72 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -17,29 +17,29 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-transaction-payment = { path = "..", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-transaction-payment = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } # Other dependencies -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } -sp-storage = { path = "../../../primitives/storage", default-features = false } +sp-storage = { workspace = true } -pallet-assets = { path = "../../assets" } -pallet-authorship = { path = "../../authorship" } -pallet-balances = { path = "../../balances" } +pallet-assets = { workspace = true, default-features = true } +pallet-authorship = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index fce712c3eba31f8a2816eb23fe221c2c7ce0c9bd..e84df1e4eb91b67a1cdb1477a6f5eac9b1b5180f 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -81,20 +81,10 @@ parameter_types! { pub const ExistentialDeposit: u64 = 10; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl WeightToFeeT for WeightToFee { diff --git a/substrate/frame/transaction-payment/rpc/Cargo.toml b/substrate/frame/transaction-payment/rpc/Cargo.toml index 2c9f814460f7cd1502779cc1645c0379611dc3d5..d2fb92a6bf3459beda62aeac800d379a897636d5 100644 --- a/substrate/frame/transaction-payment/rpc/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/Cargo.toml @@ -16,12 +16,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } -pallet-transaction-payment-rpc-runtime-api = { path = "runtime-api" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core" } -sp-rpc = { path = "../../../primitives/rpc" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-weights = { path = "../../../primitives/weights" } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-weights = { workspace = true, default-features = true } diff --git a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 6c0241ec5c03c4ceb757620d6ae4f528e379c74e..1a384c74b31c2f7964ef1383e25c44fdf479509d 100644 --- a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -16,11 +16,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -pallet-transaction-payment = { path = "../..", default-features = false } -sp-api = { path = "../../../../primitives/api", default-features = false } -sp-runtime = { path = "../../../../primitives/runtime", default-features = false } -sp-weights = { path = "../../../../primitives/weights", default-features = false } +codec = { features = ["derive"], workspace = true } +pallet-transaction-payment = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml index 4d32a5123cf3fc0ef1322cedca7d2c8ef65ba51a..7265979cedc0c3737df51a24718e9b19d6cf57e9 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml @@ -15,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } # Other dependencies -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 00391d79478c72694dd269540006ceee8e19a063..682fb320356166bdd16b068b156e3315344b3a44 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -16,8 +16,8 @@ //! # Skip Feeless Payment Pallet //! //! This pallet allows runtimes that include it to skip payment of transaction fees for -//! dispatchables marked by [`#[pallet::feeless_if]`](`macro@ -//! frame_support::pallet_prelude::feeless_if`). +//! dispatchables marked by +//! [`#[pallet::feeless_if]`](frame_support::pallet_prelude::feeless_if). //! //! ## Overview //! @@ -30,8 +30,9 @@ //! ## Integration //! //! This pallet wraps an existing transaction payment pallet. This means you should both pallets -//! in your `construct_runtime` macro and include this pallet's -//! [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an argument. +//! in your [`construct_runtime`](frame_support::construct_runtime) macro and +//! include this pallet's [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the +//! existing one as an argument. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 7b731eeb825013325159f3bfe9fcaeeca11a2024..fa61572e9831f6487cb4c19a97737475776c55d1 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -21,7 +21,7 @@ use frame_support::{ derive_impl, dispatch::DispatchClass, parameter_types, - traits::{fungible, ConstU64, Imbalance, OnUnbalanced}, + traits::{fungible, Imbalance, OnUnbalanced}, weights::{Weight, WeightToFee as WeightToFeeT}, }; use frame_system as system; @@ -73,20 +73,9 @@ impl frame_system::Config for Runtime { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl WeightToFeeT for WeightToFee { diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index bf647ca13ec1cf355e35bc9307239979ba8c8460..3bb97e0cd01562c439028f373339398b6ef4e2f1 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -16,24 +16,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "6.2.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +array-bytes = { optional = true, workspace = true, default-features = true } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-balances = { path = "../balances", default-features = false } -sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-transaction-storage-proof = { workspace = true } log = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false } -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = true } +sp-core = { workspace = true } +sp-transaction-storage-proof = { default-features = true, workspace = true } [features] default = ["std"] diff --git a/substrate/frame/transaction-storage/src/lib.rs b/substrate/frame/transaction-storage/src/lib.rs index 398cb350c501ee18dd25073b4047b5045b69fa49..6e58ee3e585abd5362bfd5f19d9a616c1b4d7ee7 100644 --- a/substrate/frame/transaction-storage/src/lib.rs +++ b/substrate/frame/transaction-storage/src/lib.rs @@ -159,11 +159,11 @@ pub mod pallet { fn on_initialize(n: BlockNumberFor) -> Weight { // Drop obsolete roots. The proof for `obsolete` will be checked later // in this block, so we drop `obsolete` - 1. - let period = >::get(); + let period = StoragePeriod::::get(); let obsolete = n.saturating_sub(period.saturating_add(One::one())); if obsolete > Zero::zero() { - >::remove(obsolete); - >::remove(obsolete); + Transactions::::remove(obsolete); + ChunkCount::::remove(obsolete); } // 2 writes in `on_initialize` and 2 writes + 2 reads in `on_finalize` T::DbWeight::get().reads_writes(2, 4) @@ -171,21 +171,21 @@ pub mod pallet { fn on_finalize(n: BlockNumberFor) { assert!( - >::take() || { + ProofChecked::::take() || { // Proof is not required for early or empty blocks. - let number = >::block_number(); - let period = >::get(); + let number = frame_system::Pallet::::block_number(); + let period = StoragePeriod::::get(); let target_number = number.saturating_sub(period); - target_number.is_zero() || >::get(target_number) == 0 + target_number.is_zero() || ChunkCount::::get(target_number) == 0 }, "Storage proof must be checked once in the block" ); // Insert new transactions - let transactions = >::take(); + let transactions = BlockTransactions::::take(); let total_chunks = transactions.last().map_or(0, |t| t.block_chunks); if total_chunks != 0 { - >::insert(n, total_chunks); - >::insert(n, transactions); + ChunkCount::::insert(n, total_chunks); + Transactions::::insert(n, transactions); } } } @@ -215,11 +215,11 @@ pub mod pallet { let content_hash = sp_io::hashing::blake2_256(&data); let extrinsic_index = - >::extrinsic_index().ok_or(Error::::BadContext)?; + frame_system::Pallet::::extrinsic_index().ok_or(Error::::BadContext)?; sp_io::transaction_index::index(extrinsic_index, data.len() as u32, content_hash); let mut index = 0; - >::mutate(|transactions| { + BlockTransactions::::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { return Err(Error::::TooManyTransactions) } @@ -253,17 +253,17 @@ pub mod pallet { index: u32, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; - let transactions = >::get(block).ok_or(Error::::RenewedNotFound)?; + let transactions = Transactions::::get(block).ok_or(Error::::RenewedNotFound)?; let info = transactions.get(index as usize).ok_or(Error::::RenewedNotFound)?; let extrinsic_index = - >::extrinsic_index().ok_or(Error::::BadContext)?; + frame_system::Pallet::::extrinsic_index().ok_or(Error::::BadContext)?; Self::apply_fee(sender, info.size)?; sp_io::transaction_index::renew(extrinsic_index, info.content_hash.into()); let mut index = 0; - >::mutate(|transactions| { + BlockTransactions::::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { return Err(Error::::TooManyTransactions) } @@ -297,15 +297,15 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { ensure_none(origin)?; ensure!(!ProofChecked::::get(), Error::::DoubleCheck); - let number = >::block_number(); - let period = >::get(); + let number = frame_system::Pallet::::block_number(); + let period = StoragePeriod::::get(); let target_number = number.saturating_sub(period); ensure!(!target_number.is_zero(), Error::::UnexpectedProof); - let total_chunks = >::get(target_number); + let total_chunks = ChunkCount::::get(target_number); ensure!(total_chunks != 0, Error::::UnexpectedProof); - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); let selected_chunk_index = random_chunk(parent_hash.as_ref(), total_chunks); - let (info, chunk_index) = match >::get(target_number) { + let (info, chunk_index) = match Transactions::::get(target_number) { Some(infos) => { let index = match infos .binary_search_by_key(&selected_chunk_index, |info| info.block_chunks) @@ -349,8 +349,7 @@ pub mod pallet { /// Collection of transaction metadata by block number. #[pallet::storage] - #[pallet::getter(fn transaction_roots)] - pub(super) type Transactions = StorageMap< + pub type Transactions = StorageMap< _, Blake2_128Concat, BlockNumberFor, @@ -360,32 +359,30 @@ pub mod pallet { /// Count indexed chunks for each block. #[pallet::storage] - pub(super) type ChunkCount = + pub type ChunkCount = StorageMap<_, Blake2_128Concat, BlockNumberFor, u32, ValueQuery>; #[pallet::storage] - #[pallet::getter(fn byte_fee)] /// Storage fee per byte. - pub(super) type ByteFee = StorageValue<_, BalanceOf>; + pub type ByteFee = StorageValue<_, BalanceOf>; #[pallet::storage] - #[pallet::getter(fn entry_fee)] /// Storage fee per transaction. - pub(super) type EntryFee = StorageValue<_, BalanceOf>; + pub type EntryFee = StorageValue<_, BalanceOf>; /// Storage period for data in blocks. Should match `sp_storage_proof::DEFAULT_STORAGE_PERIOD` /// for block authoring. #[pallet::storage] - pub(super) type StoragePeriod = StorageValue<_, BlockNumberFor, ValueQuery>; + pub type StoragePeriod = StorageValue<_, BlockNumberFor, ValueQuery>; // Intermediates #[pallet::storage] - pub(super) type BlockTransactions = + pub type BlockTransactions = StorageValue<_, BoundedVec, ValueQuery>; /// Was the proof checked in this block? #[pallet::storage] - pub(super) type ProofChecked = StorageValue<_, bool, ValueQuery>; + pub type ProofChecked = StorageValue<_, bool, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -407,9 +404,9 @@ pub mod pallet { #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - >::put(&self.byte_fee); - >::put(&self.entry_fee); - >::put(&self.storage_period); + ByteFee::::put(&self.byte_fee); + EntryFee::::put(&self.entry_fee); + StoragePeriod::::put(&self.storage_period); } } @@ -439,6 +436,21 @@ pub mod pallet { } impl Pallet { + /// Get transaction storage information from outside of this pallet. + pub fn transaction_roots( + block: BlockNumberFor, + ) -> Option> { + Transactions::::get(block) + } + /// Get ByteFee storage information from outside of this pallet. + pub fn byte_fee() -> Option> { + ByteFee::::get() + } + /// Get EntryFee storage information from outside of this pallet. + pub fn entry_fee() -> Option> { + EntryFee::::get() + } + fn apply_fee(sender: T::AccountId, size: u32) -> DispatchResult { let byte_fee = ByteFee::::get().ok_or(Error::::NotConfigured)?; let entry_fee = EntryFee::::get().ok_or(Error::::NotConfigured)?; diff --git a/substrate/frame/transaction-storage/src/tests.rs b/substrate/frame/transaction-storage/src/tests.rs index 621f74804eccae4b06f2d1f96e238334d038e68d..b725990e6e1212f073a1756ad9c6dc71134fc355 100644 --- a/substrate/frame/transaction-storage/src/tests.rs +++ b/substrate/frame/transaction-storage/src/tests.rs @@ -40,9 +40,9 @@ fn discards_data() { vec![0u8; 2000 as usize] )); let proof_provider = || { - let block_num = >::block_number(); + let block_num = frame_system::Pallet::::block_number(); if block_num == 11 { - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); Some( build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000], vec![0u8; 2000]]) .unwrap(), @@ -92,7 +92,7 @@ fn checks_proof() { vec![0u8; MAX_DATA_SIZE as usize] )); run_to_block(10, || None); - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); let proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); assert_noop!( @@ -100,7 +100,7 @@ fn checks_proof() { Error::::UnexpectedProof, ); run_to_block(11, || None); - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); let invalid_proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; 1000]]).unwrap(); assert_noop!( @@ -132,9 +132,9 @@ fn renews_data() { )); assert_eq!(Balances::free_balance(1), 1_000_000_000 - 4000 * 2 - 200 * 2); let proof_provider = || { - let block_num = >::block_number(); + let block_num = frame_system::Pallet::::block_number(); if block_num == 11 || block_num == 16 { - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); Some(build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000]]).unwrap()) } else { None diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index c93272af11d4664495cac40984f99fbc60b23f11..33e74b0a4aa282699e2a278ab6359b22df563c39 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -16,26 +16,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", "max-encoded-len", -] } -docify = "0.2.8" -impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +docify = { workspace = true } +impl-trait-for-tuples = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-balances = { path = "../balances", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-core = { optional = true, workspace = true } [dev-dependencies] -sp-io = { path = "../../primitives/io" } -pallet-utility = { path = "../utility" } -sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/treasury/README.md b/substrate/frame/treasury/README.md index 4945d79d1429694a1c8db79e8508561f515163da..2bd58a9817aab8efb47dc83752b817d6e11a59a2 100644 --- a/substrate/frame/treasury/README.md +++ b/substrate/frame/treasury/README.md @@ -26,6 +26,14 @@ and use the funds to pay developers. ### Dispatchable Functions General spending/proposal protocol: -- `propose_spend` - Make a spending proposal and stake the required deposit. -- `reject_proposal` - Reject a proposal, slashing the deposit. -- `approve_proposal` - Accept the proposal, returning the deposit. +- `spend_local` - Propose and approve a spend of treasury funds, enables the + creation of spends using the native currency of the chain, utilizing the funds + stored in the pot +- `spend` - Propose and approve a spend of treasury funds, allows spending any + asset kind managed by the treasury +- `remove_approval` - Force a previously approved proposal to be removed from + the approval queue +- `payout` - Claim a spend +- `check_status` - Check the status of the spend and remove it from the storage + if processed +- `void_spend` - Void previously approved spend diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index 0b9999e37fbea566e7ec74f61357104b9b5d6376..63978c94e682f8cca84146e69c4b2e17d06ae637 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -59,12 +59,12 @@ where const SEED: u32 = 0; -// Create the pre-requisite information needed to create a treasury `propose_spend`. +// Create the pre-requisite information needed to create a treasury `spend_local`. fn setup_proposal, I: 'static>( u: u32, ) -> (T::AccountId, BalanceOf, AccountIdLookupOf) { let caller = account("caller", u, SEED); - let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100u32.into()); + let value: BalanceOf = T::Currency::minimum_balance() * 100u32.into(); let _ = T::Currency::make_free_balance_be(&caller, value); let beneficiary = account("beneficiary", u, SEED); let beneficiary_lookup = T::Lookup::unlookup(beneficiary); @@ -73,12 +73,10 @@ fn setup_proposal, I: 'static>( // Create proposals that are approved for use in `on_initialize`. fn create_approved_proposals, I: 'static>(n: u32) -> Result<(), &'static str> { + let origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; for i in 0..n { - let (caller, value, lookup) = setup_proposal::(i); - #[allow(deprecated)] - Treasury::::propose_spend(RawOrigin::Signed(caller).into(), value, lookup)?; - let proposal_id = >::get() - 1; - Approvals::::try_append(proposal_id).unwrap(); + let (_, value, lookup) = setup_proposal::(i); + Treasury::::spend_local(origin.clone(), value, lookup)?; } ensure!(>::get().len() == n as usize, "Not all approved"); Ok(()) @@ -126,71 +124,13 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn propose_spend() -> Result<(), BenchmarkError> { - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), value, beneficiary_lookup); - - Ok(()) - } - - #[benchmark] - fn reject_proposal() -> Result<(), BenchmarkError> { - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - #[allow(deprecated)] - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup, - )?; - let proposal_id = Treasury::::proposal_count() - 1; - let reject_origin = - T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(reject_origin as T::RuntimeOrigin, proposal_id); - - Ok(()) - } - - #[benchmark] - fn approve_proposal( - p: Linear<0, { T::MaxApprovals::get() - 1 }>, - ) -> Result<(), BenchmarkError> { - let approve_origin = - T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - create_approved_proposals::(p)?; - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - #[allow(deprecated)] - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup, - )?; - let proposal_id = Treasury::::proposal_count() - 1; - - #[extrinsic_call] - _(approve_origin as T::RuntimeOrigin, proposal_id); - - Ok(()) - } - #[benchmark] fn remove_approval() -> Result<(), BenchmarkError> { - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - #[allow(deprecated)] - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup, - )?; + let origin = + T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let (_, value, beneficiary_lookup) = setup_proposal::(SEED); + Treasury::::spend_local(origin, value, beneficiary_lookup)?; let proposal_id = Treasury::::proposal_count() - 1; - Approvals::::try_append(proposal_id).unwrap(); let reject_origin = T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index 1ccd8456643233c71eb3095aa7c05980b9e52e25..b437e5ef137e3e72bf39c10223a94a67dc31b91c 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -205,9 +205,6 @@ pub mod pallet { /// The staking balance. type Currency: Currency + ReservableCurrency; - /// Origin from which approvals must come. - type ApproveOrigin: EnsureOrigin; - /// Origin from which rejections must come. type RejectOrigin: EnsureOrigin; @@ -215,22 +212,6 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. - type OnSlash: OnUnbalanced>; - - /// Fraction of a proposal's value that should be bonded in order to place the proposal. - /// An accepted proposal gets these back. A rejected proposal does not. - #[pallet::constant] - type ProposalBond: Get; - - /// Minimum amount of funds that should be placed in a deposit for making a proposal. - #[pallet::constant] - type ProposalBondMinimum: Get>; - - /// Maximum amount of funds that should be placed in a deposit for making a proposal. - #[pallet::constant] - type ProposalBondMaximum: Get>>; - /// Period between successive spends. #[pallet::constant] type SpendPeriod: Get>; @@ -363,14 +344,10 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { - /// New proposal. - Proposed { proposal_index: ProposalIndex }, /// We have ended a spend period and will now allocate funds. Spending { budget_remaining: BalanceOf }, /// Some funds have been allocated. Awarded { proposal_index: ProposalIndex, award: BalanceOf, account: T::AccountId }, - /// A proposal was rejected; funds were slashed. - Rejected { proposal_index: ProposalIndex, slashed: BalanceOf }, /// Some of our funds have been burnt. Burnt { burnt_funds: BalanceOf }, /// Spending has finished; this is the amount that rolls over until next spend. @@ -408,8 +385,6 @@ pub mod pallet { /// Error for the treasury pallet. #[pallet::error] pub enum Error { - /// Proposer's balance is too low. - InsufficientProposersBalance, /// No proposal, bounty or spend at that index. InvalidIndex, /// Too many approvals in the queue. @@ -476,123 +451,6 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { - /// Put forward a suggestion for spending. - /// - /// ## Dispatch Origin - /// - /// Must be signed. - /// - /// ## Details - /// A deposit proportional to the value is reserved and slashed if the proposal is rejected. - /// It is returned once the proposal is awarded. - /// - /// ### Complexity - /// - O(1) - /// - /// ## Events - /// - /// Emits [`Event::Proposed`] if successful. - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::propose_spend())] - #[allow(deprecated)] - #[deprecated( - note = "`propose_spend` will be removed in February 2024. Use `spend` instead." - )] - pub fn propose_spend( - origin: OriginFor, - #[pallet::compact] value: BalanceOf, - beneficiary: AccountIdLookupOf, - ) -> DispatchResult { - let proposer = ensure_signed(origin)?; - let beneficiary = T::Lookup::lookup(beneficiary)?; - - let bond = Self::calculate_bond(value); - T::Currency::reserve(&proposer, bond) - .map_err(|_| Error::::InsufficientProposersBalance)?; - - let c = Self::proposal_count(); - >::put(c + 1); - >::insert(c, Proposal { proposer, value, beneficiary, bond }); - - Self::deposit_event(Event::Proposed { proposal_index: c }); - Ok(()) - } - - /// Reject a proposed spend. - /// - /// ## Dispatch Origin - /// - /// Must be [`Config::RejectOrigin`]. - /// - /// ## Details - /// The original deposit will be slashed. - /// - /// ### Complexity - /// - O(1) - /// - /// ## Events - /// - /// Emits [`Event::Rejected`] if successful. - #[pallet::call_index(1)] - #[pallet::weight((T::WeightInfo::reject_proposal(), DispatchClass::Operational))] - #[allow(deprecated)] - #[deprecated( - note = "`reject_proposal` will be removed in February 2024. Use `spend` instead." - )] - pub fn reject_proposal( - origin: OriginFor, - #[pallet::compact] proposal_id: ProposalIndex, - ) -> DispatchResult { - T::RejectOrigin::ensure_origin(origin)?; - - let proposal = - >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; - let value = proposal.bond; - let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; - T::OnSlash::on_unbalanced(imbalance); - - Self::deposit_event(Event::::Rejected { - proposal_index: proposal_id, - slashed: value, - }); - Ok(()) - } - - /// Approve a proposal. - /// - /// ## Dispatch Origin - /// - /// Must be [`Config::ApproveOrigin`]. - /// - /// ## Details - /// - /// At a later time, the proposal will be allocated to the beneficiary and the original - /// deposit will be returned. - /// - /// ### Complexity - /// - O(1). - /// - /// ## Events - /// - /// No events are emitted from this dispatch. - #[pallet::call_index(2)] - #[pallet::weight((T::WeightInfo::approve_proposal(T::MaxApprovals::get()), DispatchClass::Operational))] - #[allow(deprecated)] - #[deprecated( - note = "`approve_proposal` will be removed in February 2024. Use `spend` instead." - )] - pub fn approve_proposal( - origin: OriginFor, - #[pallet::compact] proposal_id: ProposalIndex, - ) -> DispatchResult { - T::ApproveOrigin::ensure_origin(origin)?; - - ensure!(>::contains_key(proposal_id), Error::::InvalidIndex); - Approvals::::try_append(proposal_id) - .map_err(|_| Error::::TooManyApprovals)?; - Ok(()) - } - /// Propose and approve a spend of treasury funds. /// /// ## Dispatch Origin @@ -794,7 +652,7 @@ pub mod pallet { /// /// ## Dispatch Origin /// - /// Must be signed. + /// Must be signed /// /// ## Details /// @@ -934,15 +792,6 @@ impl, I: 'static> Pallet { T::PalletId::get().into_account_truncating() } - /// The needed bond for a proposal whose spend is `value`. - fn calculate_bond(value: BalanceOf) -> BalanceOf { - let mut r = T::ProposalBondMinimum::get().max(T::ProposalBond::get() * value); - if let Some(m) = T::ProposalBondMaximum::get() { - r = r.min(m); - } - r - } - /// Spend some money! returns number of approvals before spend. pub fn spend_funds() -> Weight { let mut total_weight = Weight::zero(); diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs index 67d81cb5c30224e68a2b72bcda3255273304b14a..97b735928192b4fef3dd9ed8cb8f3e060ce333d7 100644 --- a/substrate/frame/treasury/src/tests.rs +++ b/substrate/frame/treasury/src/tests.rs @@ -60,20 +60,10 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; } + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_utility::Config for Test { @@ -136,7 +126,6 @@ impl Pay for TestPay { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub TreasuryAccount: u128 = Treasury::account_id(); @@ -152,6 +141,7 @@ impl frame_support::traits::EnsureOrigin for TestSpendOrigin { frame_system::RawOrigin::Signed(11) => Ok(10), frame_system::RawOrigin::Signed(12) => Ok(20), frame_system::RawOrigin::Signed(13) => Ok(50), + frame_system::RawOrigin::Signed(14) => Ok(500), r => Err(RuntimeOrigin::from(r)), }) } @@ -174,13 +164,8 @@ impl> ConversionFromAssetBalance for MulBy { impl Config for Test { type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -295,56 +280,12 @@ fn minting_works() { }); } -#[test] -fn spend_proposal_takes_min_deposit() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_eq!(Balances::free_balance(0), 99); - assert_eq!(Balances::reserved_balance(0), 1); - }); -} - -#[test] -fn spend_proposal_takes_proportional_deposit() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - }); -} - -#[test] -fn spend_proposal_fails_when_proposer_poor() { - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3) - }, - Error::::InsufficientProposersBalance, - ); - }); -} - #[test] fn accepted_spend_proposal_ignored_outside_spend_period() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -365,112 +306,13 @@ fn unused_pot_should_diminish() { }); } -#[test] -fn rejected_spend_proposal_ignored_on_spend_period() { - ExtBuilder::default().build().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - - >::on_initialize(2); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 50); - }); -} - -#[test] -fn reject_already_rejected_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - -#[test] -fn reject_non_existent_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - -#[test] -fn accept_non_existent_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - -#[test] -fn accept_already_rejected_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - #[test] fn accepted_spend_proposal_enacted_on_spend_period() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -484,14 +326,7 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 150, 3)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -512,26 +347,12 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), treasury_balance, 3)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), Treasury::pot(), 3)); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -554,22 +375,9 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 99, 3)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -611,26 +419,12 @@ fn max_approvals_limited() { Balances::make_free_balance_be(&0, u64::MAX); for _ in 0..::MaxApprovals::get() { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); } // One too many will fail - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, + Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3), Error::::TooManyApprovals ); }); @@ -641,14 +435,8 @@ fn remove_already_removed_approval_fails() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); + assert_eq!(Treasury::approvals(), vec![0]); assert_ok!(Treasury::remove_approval(RuntimeOrigin::root(), 0)); assert_eq!(Treasury::approvals(), vec![]); @@ -982,11 +770,9 @@ fn check_status_works() { fn try_state_proposals_invariant_1_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal using `propose_spend` - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + assert_eq!(Proposals::::iter().count(), 1); assert_eq!(ProposalCount::::get(), 1); // Check invariant 1 holds @@ -1005,12 +791,11 @@ fn try_state_proposals_invariant_1_works() { fn try_state_proposals_invariant_2_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal using `propose_spend` - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + assert_eq!(Proposals::::iter().count(), 1); + assert_eq!(Approvals::::get().len(), 1); let current_proposal_count = ProposalCount::::get(); assert_eq!(current_proposal_count, 1); // Check invariant 2 holds @@ -1035,17 +820,10 @@ fn try_state_proposals_invariant_2_works() { fn try_state_proposals_invariant_3_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal using `propose_spend` - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 10, 3) - }); + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 10, 3)); + assert_eq!(Proposals::::iter().count(), 1); - // Approve the proposal - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); assert_eq!(Approvals::::get().len(), 1); // Check invariant 3 holds assert!(Approvals::::get() diff --git a/substrate/frame/treasury/src/weights.rs b/substrate/frame/treasury/src/weights.rs index 82277e2d28f6c80e2bfa040103bb4a311ca41ac1..8c9c6eb1d0fbb552bb210be6435e973078d64d30 100644 --- a/substrate/frame/treasury/src/weights.rs +++ b/substrate/frame/treasury/src/weights.rs @@ -52,9 +52,6 @@ use core::marker::PhantomData; /// Weight functions needed for `pallet_treasury`. pub trait WeightInfo { fn spend_local() -> Weight; - fn propose_spend() -> Weight; - fn reject_proposal() -> Weight; - fn approve_proposal(p: u32, ) -> Weight; fn remove_approval() -> Weight; fn on_initialize_proposals(p: u32, ) -> Weight; fn spend() -> Weight; @@ -81,50 +78,8 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: `Treasury::ProposalCount` (r:1 w:1) - /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Proposals` (r:0 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `177` - // Estimated: `1489` - // Minimum execution time: 24_704_000 picoseconds. - Weight::from_parts(25_484_000, 1489) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `3593` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_325_000, 3593) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:0) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `504 + p * (8 ยฑ0)` - // Estimated: `3573` - // Minimum execution time: 8_436_000 picoseconds. - Weight::from_parts(11_268_438, 3573) - // Standard Error: 1_039 - .saturating_add(Weight::from_parts(70_903, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` @@ -232,50 +187,8 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Treasury::ProposalCount` (r:1 w:1) - /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Proposals` (r:0 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `177` - // Estimated: `1489` - // Minimum execution time: 24_704_000 picoseconds. - Weight::from_parts(25_484_000, 1489) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `3593` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_325_000, 3593) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:0) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `504 + p * (8 ยฑ0)` - // Estimated: `3573` - // Minimum execution time: 8_436_000 picoseconds. - Weight::from_parts(11_268_438, 3573) - // Standard Error: 1_039 - .saturating_add(Weight::from_parts(70_903, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` diff --git a/substrate/frame/try-runtime/Cargo.toml b/substrate/frame/try-runtime/Cargo.toml index e4e5f1940b25b67a97ef6026daa13a83cab791a4..16e815975b5440000a0bc0808205e2323865adaa 100644 --- a/substrate/frame/try-runtime/Cargo.toml +++ b/substrate/frame/try-runtime/Cargo.toml @@ -15,11 +15,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index e44bb90dd7f8438673f7b5a23b1e7b1c15da03b5..fbb71513c6ae7e59595a8b06a39c62d5db2ebecf 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -15,24 +15,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -docify = "0.2.8" -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-utility = { path = "../utility", default-features = false, optional = true } -pallet-proxy = { path = "../proxy", default-features = false, optional = true } +codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +pallet-balances = { optional = true, workspace = true } +pallet-utility = { optional = true, workspace = true } +pallet-proxy = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -pallet-balances = { path = "../balances" } -pallet-utility = { path = "../utility" } -pallet-proxy = { path = "../proxy" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index f42d4cb58a2ac77b843f7624723d414216cb26da..84ce45e835280dd9beeddacf84193ff858c3ea80 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -36,24 +36,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxLocks: u32 = 10; -} +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = MaxLocks; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type MaxFreezes = ConstU32<0>; } impl pallet_utility::Config for Test { diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index 65b727b40b254567d46bff7704b4f124b78820f0..d41d322bb63f1a9e72915549107f673a340c039d 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -16,20 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -sp-std = { path = "../../primitives/std" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/uniques/src/mock.rs b/substrate/frame/uniques/src/mock.rs index 9fd7f87e159bb29bb7206fcba05b573ccc140115..c3b74eb8c255417b7a6ed12f2d2ee771ec3d9f48 100644 --- a/substrate/frame/uniques/src/mock.rs +++ b/substrate/frame/uniques/src/mock.rs @@ -43,20 +43,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index 00e8be75a3de600eada40f33afd4af94156dd554..0ccdac6be6236ef68f528f5c24f8904a7dba1670 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -16,22 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-root-testing = { path = "../root-testing" } -pallet-collective = { path = "../collective" } -pallet-timestamp = { path = "../timestamp" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +pallet-root-testing = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index 9bcbec99f3b441669eaf68433f0e88b3f5117502..eb2047aac28af40e9d8872c38ed21b89666e1c48 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -151,20 +151,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_root_testing::Config for Test { diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index 7372b84240364aff99bf4ecbe35929b219f8b50e..3aca5683465cd39e6873cb123135312b8c3ca74b 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io", default-features = false } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/vesting/src/benchmarking.rs b/substrate/frame/vesting/src/benchmarking.rs index 311590873d95f84f36d25b907f9fcac8c399106c..68214c4f47ccca2749074bc6e75cc8d218b2e3f7 100644 --- a/substrate/frame/vesting/src/benchmarking.rs +++ b/substrate/frame/vesting/src/benchmarking.rs @@ -24,7 +24,7 @@ use frame_support::assert_ok; use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul}; -use super::*; +use super::{Vesting as VestingStorage, *}; use crate::Pallet as Vesting; const SEED: u32 = 0; @@ -291,7 +291,7 @@ benchmarks! { "Vesting balance should equal sum locked of all schedules", ); assert_eq!( - Vesting::::vesting(&caller).unwrap().len(), + VestingStorage::::get(&caller).unwrap().len(), s as usize, "There should be exactly max vesting schedules" ); @@ -304,7 +304,7 @@ benchmarks! { ); let expected_index = (s - 2) as usize; assert_eq!( - Vesting::::vesting(&caller).unwrap()[expected_index], + VestingStorage::::get(&caller).unwrap()[expected_index], expected_schedule ); assert_eq!( @@ -313,7 +313,7 @@ benchmarks! { "Vesting balance should equal total locked of all schedules", ); assert_eq!( - Vesting::::vesting(&caller).unwrap().len(), + VestingStorage::::get(&caller).unwrap().len(), (s - 1) as usize, "Schedule count should reduce by 1" ); @@ -344,7 +344,7 @@ benchmarks! { "Vesting balance should reflect that we are half way through all schedules duration", ); assert_eq!( - Vesting::::vesting(&caller).unwrap().len(), + VestingStorage::::get(&caller).unwrap().len(), s as usize, "There should be exactly max vesting schedules" ); @@ -359,12 +359,12 @@ benchmarks! { ); let expected_index = (s - 2) as usize; assert_eq!( - Vesting::::vesting(&caller).unwrap()[expected_index], + VestingStorage::::get(&caller).unwrap()[expected_index], expected_schedule, "New schedule is properly created and placed" ); assert_eq!( - Vesting::::vesting(&caller).unwrap()[expected_index], + VestingStorage::::get(&caller).unwrap()[expected_index], expected_schedule ); assert_eq!( @@ -373,7 +373,7 @@ benchmarks! { "Vesting balance should equal half total locked of all schedules", ); assert_eq!( - Vesting::::vesting(&caller).unwrap().len(), + VestingStorage::::get(&caller).unwrap().len(), (s - 1) as usize, "Schedule count should reduce by 1" ); @@ -404,7 +404,7 @@ force_remove_vesting_schedule { }: _(RawOrigin::Root, target_lookup, schedule_index) verify { assert_eq!( - Vesting::::vesting(&target).unwrap().len(), + VestingStorage::::get(&target).unwrap().len(), schedule_index as usize, "Schedule count should reduce by 1" ); diff --git a/substrate/frame/vesting/src/lib.rs b/substrate/frame/vesting/src/lib.rs index 4101caded4180b25c9dbdc8ecfda295900ddc0cf..3868f1a8592e95c69aa2fd09b8a201b869016fba 100644 --- a/substrate/frame/vesting/src/lib.rs +++ b/substrate/frame/vesting/src/lib.rs @@ -200,7 +200,6 @@ pub mod pallet { /// Information regarding the vesting of a given account. #[pallet::storage] - #[pallet::getter(fn vesting)] pub type Vesting = StorageMap< _, Blake2_128Concat, @@ -419,7 +418,7 @@ pub mod pallet { let schedule1_index = schedule1_index as usize; let schedule2_index = schedule2_index as usize; - let schedules = Self::vesting(&who).ok_or(Error::::NotVesting)?; + let schedules = Vesting::::get(&who).ok_or(Error::::NotVesting)?; let merge_action = VestingAction::Merge { index1: schedule1_index, index2: schedule2_index }; @@ -464,6 +463,14 @@ pub mod pallet { } impl Pallet { + // Public function for accessing vesting storage + pub fn vesting( + account: T::AccountId, + ) -> Option, BlockNumberFor>, MaxVestingSchedulesGet>> + { + Vesting::::get(account) + } + // Create a new `VestingInfo`, based off of two other `VestingInfo`s. // NOTE: We assume both schedules have had funds unlocked up through the current block. fn merge_vesting_info( @@ -622,7 +629,7 @@ impl Pallet { /// Unlock any vested funds of `who`. fn do_vest(who: T::AccountId) -> DispatchResult { - let schedules = Self::vesting(&who).ok_or(Error::::NotVesting)?; + let schedules = Vesting::::get(&who).ok_or(Error::::NotVesting)?; let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), VestingAction::Passive)?; @@ -687,7 +694,7 @@ where /// Get the amount that is currently being vested and cannot be transferred out of this account. fn vesting_balance(who: &T::AccountId) -> Option> { - if let Some(v) = Self::vesting(who) { + if let Some(v) = Vesting::::get(who) { let now = T::BlockNumberProvider::current_block_number(); let total_locked_now = v.iter().fold(Zero::zero(), |total, schedule| { schedule.locked_at::(now).saturating_add(total) @@ -726,7 +733,7 @@ where return Err(Error::::InvalidScheduleParams.into()) }; - let mut schedules = Self::vesting(who).unwrap_or_default(); + let mut schedules = Vesting::::get(who).unwrap_or_default(); // NOTE: we must push the new schedule so that `exec_action` // will give the correct new locked amount. @@ -764,7 +771,7 @@ where /// Remove a vesting schedule for a given account. fn remove_vesting_schedule(who: &T::AccountId, schedule_index: u32) -> DispatchResult { - let schedules = Self::vesting(who).ok_or(Error::::NotVesting)?; + let schedules = Vesting::::get(who).ok_or(Error::::NotVesting)?; let remove_action = VestingAction::Remove { index: schedule_index as usize }; let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), remove_action)?; diff --git a/substrate/frame/vesting/src/mock.rs b/substrate/frame/vesting/src/mock.rs index 674a6f6e2a83677eae30fae445c05def44741e12..f0954a5b989c8a20d1fbb7feba9e49b20d61a27c 100644 --- a/substrate/frame/vesting/src/mock.rs +++ b/substrate/frame/vesting/src/mock.rs @@ -15,10 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::{ - derive_impl, parameter_types, - traits::{ConstU32, WithdrawReasons}, -}; +use frame_support::{derive_impl, parameter_types, traits::WithdrawReasons}; use sp_runtime::{traits::Identity, BuildStorage}; use super::*; @@ -41,20 +38,10 @@ impl frame_system::Config for Test { type Block = Block; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type AccountStore = System; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; - type MaxLocks = ConstU32<10>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; diff --git a/substrate/frame/vesting/src/tests.rs b/substrate/frame/vesting/src/tests.rs index 2e1e41fc9578fbb8e7e0b990f0b91c9c13a86402..004da0dfbfa137be6b3f3e5c5894ebdb5a36e992 100644 --- a/substrate/frame/vesting/src/tests.rs +++ b/substrate/frame/vesting/src/tests.rs @@ -65,9 +65,9 @@ fn check_vesting_status() { 64, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&1).unwrap(), vec![user1_vesting_schedule]); // Account 1 has a vesting schedule - assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); // Account 2 has a vesting schedule - assert_eq!(Vesting::vesting(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 has a vesting schedule + assert_eq!(VestingStorage::::get(&1).unwrap(), vec![user1_vesting_schedule]); // Account 1 has a vesting schedule + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![user2_vesting_schedule]); // Account 2 has a vesting schedule + assert_eq!(VestingStorage::::get(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 has a vesting schedule // Account 1 has only 128 units vested from their illiquid ED * 5 units at block 1 assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); @@ -110,7 +110,7 @@ fn check_vesting_status_for_multi_schedule_account() { 10, ); // Account 2 already has a vesting schedule. - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); // Account 2's free balance is from sched0. let free_balance = Balances::free_balance(&2); @@ -128,7 +128,7 @@ fn check_vesting_status_for_multi_schedule_account() { let free_balance = Balances::free_balance(&2); assert_eq!(free_balance, ED * (10 + 20)); // The most recently added schedule exists. - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1]); // sched1 has free funds at block #1, but nothing else. assert_eq!(Vesting::vesting_balance(&2), Some(free_balance - sched1.per_block())); @@ -171,7 +171,7 @@ fn check_vesting_status_for_multi_schedule_account() { assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Since we have not called any extrinsics that would unlock funds the schedules // are still in storage, - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1, sched2]); // but once we unlock the funds, they are removed from storage. vest_and_assert_no_vesting::(2); }); @@ -207,7 +207,7 @@ fn vested_balance_should_transfer_with_multi_sched() { let sched0 = VestingInfo::new(5 * ED, 128, 0); assert_ok!(Vesting::vested_transfer(Some(13).into(), 1, sched0)); // Total 10*ED locked for all the schedules. - assert_eq!(Vesting::vesting(&1).unwrap(), vec![sched0, sched0]); + assert_eq!(VestingStorage::::get(&1).unwrap(), vec![sched0, sched0]); let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 3840); // Account 1 has free balance @@ -245,7 +245,7 @@ fn vested_balance_should_transfer_using_vest_other_with_multi_sched() { let sched0 = VestingInfo::new(5 * ED, 128, 0); assert_ok!(Vesting::vested_transfer(Some(13).into(), 1, sched0)); // Total of 10*ED of locked for all the schedules. - assert_eq!(Vesting::vesting(&1).unwrap(), vec![sched0, sched0]); + assert_eq!(VestingStorage::::get(&1).unwrap(), vec![sched0, sched0]); let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 3840); // Account 1 has free balance @@ -305,7 +305,7 @@ fn liquid_funds_should_transfer_with_delayed_vesting() { 64, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&12).unwrap(), vec![user12_vesting_schedule]); + assert_eq!(VestingStorage::::get(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 can still send liquid funds assert_ok!(Balances::transfer_allow_death(Some(12).into(), 3, 256 * 5)); @@ -320,7 +320,7 @@ fn vested_transfer_works() { assert_eq!(user3_free_balance, 256 * 30); assert_eq!(user4_free_balance, 256 * 40); // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); // Make the schedule for the new transfer. let new_vesting_schedule = VestingInfo::new( 256 * 5, @@ -329,7 +329,7 @@ fn vested_transfer_works() { ); assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4).unwrap(), vec![new_vesting_schedule]); + assert_eq!(VestingStorage::::get(&4).unwrap(), vec![new_vesting_schedule]); // Ensure the transfer happened correctly. let user3_free_balance_updated = Balances::free_balance(&3); assert_eq!(user3_free_balance_updated, 256 * 25); @@ -368,7 +368,7 @@ fn vested_transfer_correctly_fails() { ED, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![user2_vesting_schedule]); // Fails due to too low transfer amount. let new_vesting_schedule_too_low = @@ -450,7 +450,7 @@ fn force_vested_transfer_works() { assert_eq!(user3_free_balance, ED * 30); assert_eq!(user4_free_balance, ED * 40); // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); // Make the schedule for the new transfer. let new_vesting_schedule = VestingInfo::new( ED * 5, @@ -469,8 +469,8 @@ fn force_vested_transfer_works() { new_vesting_schedule )); // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4).unwrap()[0], new_vesting_schedule); - assert_eq!(Vesting::vesting(&4).unwrap().len(), 1); + assert_eq!(VestingStorage::::get(&4).unwrap()[0], new_vesting_schedule); + assert_eq!(VestingStorage::::get(&4).unwrap().len(), 1); // Ensure the transfer happened correctly. let user3_free_balance_updated = Balances::free_balance(&3); assert_eq!(user3_free_balance_updated, ED * 25); @@ -508,7 +508,7 @@ fn force_vested_transfer_correctly_fails() { ED, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![user2_vesting_schedule]); // Too low transfer amount. let new_vesting_schedule_too_low = @@ -594,12 +594,12 @@ fn merge_schedules_that_have_not_started() { ED, // Vest over 20 blocks. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); assert_eq!(Balances::usable_balance(&2), 0); // Add a schedule that is identical to the one that already exists. assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched0)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched0]); assert_eq!(Balances::usable_balance(&2), 0); assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); @@ -610,7 +610,7 @@ fn merge_schedules_that_have_not_started() { sched0.per_block() * 2, 10, // Starts at the block the schedules are merged/ ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched1]); assert_eq!(Balances::usable_balance(&2), 0); }); @@ -626,7 +626,7 @@ fn merge_ongoing_schedules() { ED, // Vest over 20 blocks. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); let sched1 = VestingInfo::new( ED * 10, @@ -634,7 +634,7 @@ fn merge_ongoing_schedules() { sched0.starting_block() + 5, // Start at block 15. ); assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1]); // Got to half way through the second schedule where both schedules are actively vesting. let cur_block = 20; @@ -666,7 +666,7 @@ fn merge_ongoing_schedules() { let sched2_per_block = sched2_locked / sched2_duration; let sched2 = VestingInfo::new(sched2_locked, sched2_per_block, cur_block); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched2]); // And just to double check, we assert the new merged schedule we be cleaned up as expected. System::set_block_number(30); @@ -696,7 +696,7 @@ fn merging_shifts_other_schedules_index() { ); // Account 3 starts out with no schedules, - assert_eq!(Vesting::vesting(&3), None); + assert_eq!(VestingStorage::::get(&3), None); // and some usable balance. let usable_balance = Balances::usable_balance(&3); assert_eq!(usable_balance, 30 * ED); @@ -710,7 +710,7 @@ fn merging_shifts_other_schedules_index() { assert_ok!(Vesting::vested_transfer(Some(4).into(), 3, sched2)); // With no schedules vested or merged they are in the order they are created - assert_eq!(Vesting::vesting(&3).unwrap(), vec![sched0, sched1, sched2]); + assert_eq!(VestingStorage::::get(&3).unwrap(), vec![sched0, sched1, sched2]); // and the usable balance has not changed. assert_eq!(usable_balance, Balances::usable_balance(&3)); @@ -731,7 +731,7 @@ fn merging_shifts_other_schedules_index() { let sched3 = VestingInfo::new(sched3_locked, sched3_per_block, sched3_start); // The not touched schedule moves left and the new merged schedule is appended. - assert_eq!(Vesting::vesting(&3).unwrap(), vec![sched1, sched3]); + assert_eq!(VestingStorage::::get(&3).unwrap(), vec![sched1, sched3]); // The usable balance hasn't changed since none of the schedules have started. assert_eq!(Balances::usable_balance(&3), usable_balance); }); @@ -748,7 +748,7 @@ fn merge_ongoing_and_yet_to_be_started_schedules() { ED, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); // Fast forward to half way through the life of sched1. let mut cur_block = @@ -800,7 +800,7 @@ fn merge_ongoing_and_yet_to_be_started_schedules() { let sched2_per_block = sched2_locked / sched2_duration; let sched2 = VestingInfo::new(sched2_locked, sched2_per_block, sched2_start); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched2]); }); } @@ -815,7 +815,7 @@ fn merge_finished_and_ongoing_schedules() { ED, // Vesting over 20 blocks. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); let sched1 = VestingInfo::new( ED * 40, @@ -834,7 +834,7 @@ fn merge_finished_and_ongoing_schedules() { assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched2)); // The schedules are in expected order prior to merging. - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1, sched2]); // Fast forward to sched0's end block. let cur_block = sched0.ending_block_as_balance::(); @@ -849,7 +849,7 @@ fn merge_finished_and_ongoing_schedules() { // sched2 is now the first, since sched0 & sched1 get filtered out while "merging". // sched1 gets treated like the new merged schedule by getting pushed onto back // of the vesting schedules vec. Note: sched0 finished at the current block. - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched2, sched1]); // sched0 has finished, so its funds are fully unlocked. let sched0_unlocked_now = sched0.locked(); @@ -877,7 +877,7 @@ fn merge_finishing_schedules_does_not_create_a_new_one() { ED, // 20 block duration. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); // Create sched1 and transfer it to account 2. let sched1 = VestingInfo::new( @@ -886,7 +886,7 @@ fn merge_finishing_schedules_does_not_create_a_new_one() { 10, ); assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched1)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1]); let all_scheds_end = sched0 .ending_block_as_balance::() @@ -919,7 +919,7 @@ fn merge_finished_and_yet_to_be_started_schedules() { ED, // 20 block duration. 10, // Ends at block 30 ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); let sched1 = VestingInfo::new( ED * 30, @@ -927,7 +927,7 @@ fn merge_finished_and_yet_to_be_started_schedules() { 35, ); assert_ok!(Vesting::vested_transfer(Some(13).into(), 2, sched1)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1]); let sched2 = VestingInfo::new( ED * 40, @@ -936,7 +936,7 @@ fn merge_finished_and_yet_to_be_started_schedules() { ); // Add a 3rd schedule to demonstrate how sched1 shifts. assert_ok!(Vesting::vested_transfer(Some(13).into(), 2, sched2)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1, sched2]); System::set_block_number(30); @@ -951,7 +951,7 @@ fn merge_finished_and_yet_to_be_started_schedules() { // sched0 is removed since it finished, and sched1 is removed and then pushed on the back // because it is treated as the merged schedule - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched2, sched1]); // The usable balance is updated because merging fully unlocked sched0. assert_eq!(Balances::usable_balance(&2), sched0.locked()); @@ -967,7 +967,7 @@ fn merge_schedules_throws_proper_errors() { ED, // 20 block duration. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); // Account 2 only has 1 vesting schedule. assert_noop!( @@ -976,12 +976,12 @@ fn merge_schedules_throws_proper_errors() { ); // Account 4 has 0 vesting schedules. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); assert_noop!(Vesting::merge_schedules(Some(4).into(), 0, 1), Error::::NotVesting); // There are enough schedules to merge but an index is non-existent. Vesting::vested_transfer(Some(3).into(), 2, sched0).unwrap(); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched0]); assert_noop!( Vesting::merge_schedules(Some(2).into(), 0, 2), Error::::ScheduleIndexOutOfBounds @@ -1014,17 +1014,17 @@ fn generates_multiple_schedules_from_genesis_config() { .build() .execute_with(|| { let user1_sched1 = VestingInfo::new(5 * ED, 128, 0u64); - assert_eq!(Vesting::vesting(&1).unwrap(), vec![user1_sched1]); + assert_eq!(VestingStorage::::get(&1).unwrap(), vec![user1_sched1]); let user2_sched1 = VestingInfo::new(1 * ED, 12, 10u64); let user2_sched2 = VestingInfo::new(2 * ED, 25, 10u64); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_sched1, user2_sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![user2_sched1, user2_sched2]); let user12_sched1 = VestingInfo::new(1 * ED, 12, 10u64); let user12_sched2 = VestingInfo::new(2 * ED, 25, 10u64); let user12_sched3 = VestingInfo::new(3 * ED, 38, 10u64); assert_eq!( - Vesting::vesting(&12).unwrap(), + VestingStorage::::get(&12).unwrap(), vec![user12_sched1, user12_sched2, user12_sched3] ); }); @@ -1162,7 +1162,7 @@ fn remove_vesting_schedule() { assert_eq!(Balances::free_balance(&3), 256 * 30); assert_eq!(Balances::free_balance(&4), 256 * 40); // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); // Make the schedule for the new transfer. let new_vesting_schedule = VestingInfo::new( ED * 5, @@ -1171,7 +1171,7 @@ fn remove_vesting_schedule() { ); assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4).unwrap(), vec![new_vesting_schedule]); + assert_eq!(VestingStorage::::get(&4).unwrap(), vec![new_vesting_schedule]); // Account 4 has 5 * 256 locked. assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); // Verify only root can call. @@ -1183,7 +1183,7 @@ fn remove_vesting_schedule() { // Appropriate storage is cleaned up. assert!(!>::contains_key(4)); // Check the vesting balance is zero. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); // Verifies that trying to remove a schedule when it doesnt exist throws error. assert_noop!( Vesting::force_remove_vesting_schedule(RawOrigin::Root.into(), 4, 0), diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index 61bbb278019de8b4c012c460ab4d836cdf8a556d..922f9124d913a1b81c9142f9822635753eec728c 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -15,20 +15,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-preimage = { path = "../preimage" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +pallet-balances = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/whitelist/src/mock.rs b/substrate/frame/whitelist/src/mock.rs index 6fb8711057ef0cd53cc388e07cf90347c0e13000..0a97d1c2df5445223a4253cf0e1b81bd020806b0 100644 --- a/substrate/frame/whitelist/src/mock.rs +++ b/substrate/frame/whitelist/src/mock.rs @@ -21,7 +21,7 @@ use crate as pallet_whitelist; -use frame_support::{construct_runtime, derive_impl, traits::ConstU64}; +use frame_support::{construct_runtime, derive_impl}; use frame_system::EnsureRoot; use sp_runtime::BuildStorage; @@ -43,20 +43,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_preimage::Config for Test { diff --git a/substrate/kitchensink_runtime.wasm b/substrate/kitchensink_runtime.wasm new file mode 100644 index 0000000000000000000000000000000000000000..7ebb14371243afa4956a107374b27a4e686f0360 Binary files /dev/null and b/substrate/kitchensink_runtime.wasm differ diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index f48480f398d00729a5fb10e0c9bcfba5d62f9776..f8cbad8e31f02d38ee5a218d107968f634420a67 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -16,26 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -sp-api-proc-macro = { path = "proc-macro", default-features = false } -sp-core = { path = "../core", default-features = false } -sp-std = { path = "../std", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } -sp-runtime-interface = { path = "../runtime-interface", default-features = false } -sp-externalities = { path = "../externalities", default-features = false, optional = true } -sp-version = { path = "../version", default-features = false } -sp-state-machine = { path = "../state-machine", default-features = false, optional = true } -sp-trie = { path = "../trie", default-features = false, optional = true } -hash-db = { version = "0.16.0", optional = true } +codec = { workspace = true } +sp-api-proc-macro = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-externalities = { optional = true, workspace = true } +sp-version = { workspace = true } +sp-state-machine = { optional = true, workspace = true } +sp-trie = { optional = true, workspace = true } +hash-db = { optional = true, workspace = true, default-features = true } thiserror = { optional = true, workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = [ +scale-info = { features = [ "derive", -] } -sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true } +], workspace = true } +sp-metadata-ir = { optional = true, workspace = true } log = { workspace = true } +docify = { workspace = true } [dev-dependencies] -sp-test-primitives = { path = "../test-primitives" } +sp-test-primitives = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index b1bc547f3e4ae82e4e4331643c0aef14e1df3451..7d7fc19fcf5beb4274074a7aed49fa7e75147bd9 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -21,14 +21,14 @@ proc-macro = true [dependencies] quote = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } -proc-macro2 = "1.0.56" -blake2 = { version = "0.10.4", default-features = false } -proc-macro-crate = "3.0.0" -expander = "2.0.0" -Inflector = "0.11.4" +proc-macro2 = { workspace = true } +blake2 = { workspace = true } +proc-macro-crate = { workspace = true } +expander = { workspace = true } +Inflector = { workspace = true } [dev-dependencies] -assert_matches = "1.3.0" +assert_matches = { workspace = true } [features] # Required for the doc tests diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 20f989c4882e35fe06d5496f851b4adec2c6f1c0..cd8da8ba2374e142835967c8f304c62b1d0bf250 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -532,6 +532,7 @@ pub trait ConstructRuntimeApi> { fn construct_runtime_api(call: &C) -> ApiRef; } +#[docify::export] /// Init the [`RuntimeLogger`](sp_runtime::runtime_logger::RuntimeLogger). pub fn init_runtime_logger() { #[cfg(not(feature = "disable-logging"))] diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index b49f774161fd3f593872452a54e4f186a6743f6c..6c159fc63d0d3b86bbf59cbd43b9897d61c74d8a 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -15,25 +15,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = ".." } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -sp-version = { path = "../../version" } -sp-tracing = { path = "../../tracing" } -sp-runtime = { path = "../../runtime" } -sp-consensus = { path = "../../consensus/common" } -sc-block-builder = { path = "../../../client/block-builder" } -codec = { package = "parity-scale-codec", version = "3.6.12" } -sp-state-machine = { path = "../../state-machine" } -trybuild = "1.0.88" -rustversion = "1.0.6" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +sp-api = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +sp-version = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +trybuild = { workspace = true } +rustversion = { workspace = true } +scale-info = { features = ["derive"], workspace = true } [dev-dependencies] -criterion = "0.5.1" -futures = "0.3.30" +criterion = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -sp-core = { path = "../../core" } -static_assertions = "1.1.0" +sp-core = { workspace = true, default-features = true } +static_assertions = { workspace = true, default-features = true } [[bench]] name = "bench" diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index cbb9f2133577b93234c052dba329ed1d5f453be7..fbf9def20fce7468ced3036c107cbbaac9e2593b 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -18,12 +18,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../core", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +sp-core = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } -sp-std = { path = "../std", default-features = false } -sp-io = { path = "../io", default-features = false } +sp-std = { workspace = true } +sp-io = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/application-crypto/test/Cargo.toml b/substrate/primitives/application-crypto/test/Cargo.toml index 0057606b38e57112e2988d96dbaa342059616ee0..43f9d3852005a87f06b451cc30177f504d0d1366 100644 --- a/substrate/primitives/application-crypto/test/Cargo.toml +++ b/substrate/primitives/application-crypto/test/Cargo.toml @@ -16,8 +16,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../../api" } -sp-application-crypto = { path = ".." } -sp-core = { path = "../../core", default-features = false } -sp-keystore = { path = "../../keystore", default-features = false } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-keystore = { workspace = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index a9f2b80156f5ec121eea75abb389e02bbf99884e..a0d80fc9b504984121c990d9053d67383ad9c57b 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -17,23 +17,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", "max-encoded-len", -] } -integer-sqrt = "0.1.2" -num-traits = { version = "0.2.17", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +integer-sqrt = { workspace = true } +num-traits = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -static_assertions = "1.1.0" -sp-std = { path = "../std", default-features = false } -docify = "0.2.8" +static_assertions = { workspace = true, default-features = true } +sp-std = { workspace = true } +docify = { workspace = true } [dev-dependencies] -criterion = "0.5.1" -primitive-types = "0.12.0" -sp-crypto-hashing = { path = "../crypto/hashing" } -rand = "0.8.5" +criterion = { workspace = true, default-features = true } +primitive-types = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/arithmetic/fuzzer/Cargo.toml b/substrate/primitives/arithmetic/fuzzer/Cargo.toml index ace30e9c90e91d6b4c784cf090329b91e8095fad..c978393af34c42876024dc2e429e7b7b846008a8 100644 --- a/substrate/primitives/arithmetic/fuzzer/Cargo.toml +++ b/substrate/primitives/arithmetic/fuzzer/Cargo.toml @@ -17,11 +17,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -arbitrary = "1.3.2" -fraction = "0.13.1" -honggfuzz = "0.5.49" -num-bigint = "0.4.3" -sp-arithmetic = { path = ".." } +arbitrary = { workspace = true } +fraction = { workspace = true } +honggfuzz = { workspace = true } +num-bigint = { workspace = true } +sp-arithmetic = { workspace = true, default-features = true } [[bin]] name = "biguint" diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index 72a8bb7fc47d0f76a49de45a3828424eb7a0a240..26e08b8504c31d89b5ccbec33330a8877562f40d 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -16,11 +16,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-api = { path = "../api", default-features = false } -sp-application-crypto = { path = "../application-crypto", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/block-builder/Cargo.toml b/substrate/primitives/block-builder/Cargo.toml index cc4b10851544d601ef9caea512280d49b77cd443..ddc0c00a3be8a19e11016db81da1d22372350dad 100644 --- a/substrate/primitives/block-builder/Cargo.toml +++ b/substrate/primitives/block-builder/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false } -sp-inherents = { path = "../inherents", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-api = { workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 5e51a2d06ed7a8f1de6fda7b2b19bf49fa515e51..aedd720612c33d35722eca2f3815f562e15f23c0 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -17,14 +17,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -futures = "0.3.30" +codec = { features = ["derive"], workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -schnellru = "0.2.1" +parking_lot = { workspace = true, default-features = true } +schnellru = { workspace = true } thiserror = { workspace = true } -sp-api = { path = "../api" } -sp-consensus = { path = "../consensus/common" } -sp-database = { path = "../database" } -sp-runtime = { path = "../runtime" } -sp-state-machine = { path = "../state-machine" } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-database = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index 933e41e2ab453bb3593ba94dce6397a097f9ed46..a928217d5885454b704c527354cabb5ba49af923 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -24,13 +24,12 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justifications, }; -use std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; - -use crate::header_metadata::HeaderMetadata; +use std::collections::{btree_set::BTreeSet, HashMap, VecDeque}; use crate::{ error::{Error, Result}, - tree_route, TreeRoute, + header_metadata::HeaderMetadata, + tree_route, CachedHeaderMetadata, }; /// Blockchain database header backend. Does not perform any validation. @@ -129,6 +128,32 @@ where { } +struct MinimalBlockMetadata { + number: NumberFor, + hash: Block::Hash, + parent: Block::Hash, +} + +impl Clone for MinimalBlockMetadata +where + Block: BlockT, +{ + fn clone(&self) -> Self { + Self { number: self.number, hash: self.hash, parent: self.parent } + } +} + +impl Copy for MinimalBlockMetadata where Block: BlockT {} + +impl From<&CachedHeaderMetadata> for MinimalBlockMetadata +where + Block: BlockT, +{ + fn from(value: &CachedHeaderMetadata) -> Self { + Self { number: value.number, hash: value.hash, parent: value.parent } + } +} + /// Blockchain database backend. Does not perform any validation. pub trait Backend: HeaderBackend + HeaderMetadata @@ -227,59 +252,128 @@ pub trait Backend: finalized_block_hash: Block::Hash, finalized_block_number: NumberFor, ) -> std::result::Result, Error> { - let mut result = DisplacedLeavesAfterFinalization::default(); + let leaves = self.leaves()?; - if finalized_block_number == Zero::zero() { - return Ok(result) + // If we have only one leaf there are no forks, and we can return early. + if finalized_block_number == Zero::zero() || leaves.len() == 1 { + return Ok(DisplacedLeavesAfterFinalization::default()) } - // For each leaf determine whether it belongs to a non-canonical branch. - for leaf_hash in self.leaves()? { - let leaf_block_header = self.expect_header(leaf_hash)?; - let leaf_number = *leaf_block_header.number(); + // Store hashes of finalized blocks for quick checking later, the last block if the + // finalized one + let mut finalized_chain = VecDeque::new(); + finalized_chain + .push_front(MinimalBlockMetadata::from(&self.header_metadata(finalized_block_hash)?)); + + // Local cache is a performance optimization in case of finalized block deep below the + // tip of the chain with a lot of leaves above finalized block + let mut local_cache = HashMap::>::new(); + + let mut result = DisplacedLeavesAfterFinalization { + displaced_leaves: Vec::with_capacity(leaves.len()), + displaced_blocks: Vec::with_capacity(leaves.len()), + }; + let mut displaced_blocks_candidates = Vec::new(); + + for leaf_hash in leaves { + let mut current_header_metadata = + MinimalBlockMetadata::from(&self.header_metadata(leaf_hash)?); + let leaf_number = current_header_metadata.number; + + // Collect all block hashes until the height of the finalized block + displaced_blocks_candidates.clear(); + while current_header_metadata.number > finalized_block_number { + displaced_blocks_candidates.push(current_header_metadata.hash); + + let parent_hash = current_header_metadata.parent; + match local_cache.get(&parent_hash) { + Some(metadata_header) => { + current_header_metadata = *metadata_header; + }, + None => { + current_header_metadata = + MinimalBlockMetadata::from(&self.header_metadata(parent_hash)?); + // Cache locally in case more branches above finalized block reference + // the same block hash + local_cache.insert(parent_hash, current_header_metadata); + }, + } + } + + // If points back to the finalized header then nothing left to do, this leaf will be + // checked again later + if current_header_metadata.hash == finalized_block_hash { + continue; + } - let leaf_tree_route = match tree_route(self, leaf_hash, finalized_block_hash) { - Ok(tree_route) => tree_route, - Err(Error::UnknownBlock(_)) => { - // Sometimes routes can't be calculated. E.g. after warp sync. + // Otherwise the whole leaf branch needs to be pruned, track it all the way to the + // point of branching from the finalized chain + result.displaced_leaves.push((leaf_number, leaf_hash)); + result.displaced_blocks.extend(displaced_blocks_candidates.drain(..)); + result.displaced_blocks.push(current_header_metadata.hash); + // Collect the rest of the displaced blocks of leaf branch + for distance_from_finalized in 1_u32.. { + // Find block at `distance_from_finalized` from finalized block + let (finalized_chain_block_number, finalized_chain_block_hash) = + match finalized_chain.iter().rev().nth(distance_from_finalized as usize) { + Some(header) => (header.number, header.hash), + None => { + let metadata = MinimalBlockMetadata::from(&self.header_metadata( + finalized_chain.front().expect("Not empty; qed").parent, + )?); + let result = (metadata.number, metadata.hash); + finalized_chain.push_front(metadata); + result + }, + }; + + if current_header_metadata.number <= finalized_chain_block_number { + // Skip more blocks until we get all blocks on finalized chain until the height + // of the parent block continue; - }, - Err(e) => Err(e)?, - }; + } - // Is it a stale fork? - let needs_pruning = leaf_tree_route.common_block().hash != finalized_block_hash; + let parent_hash = current_header_metadata.parent; + if finalized_chain_block_hash == parent_hash { + // Reached finalized chain, nothing left to do + break; + } - if needs_pruning { - result.displaced_leaves.insert(leaf_hash, leaf_number); - result.tree_routes.insert(leaf_hash, leaf_tree_route); + // Store displaced block and look deeper for block on finalized chain + result.displaced_blocks.push(parent_hash); + current_header_metadata = + MinimalBlockMetadata::from(&self.header_metadata(parent_hash)?); } } - Ok(result) + // There could be duplicates shared by multiple branches, clean them up + result.displaced_blocks.sort_unstable(); + result.displaced_blocks.dedup(); + + return Ok(result); } } /// Result of [`Backend::displaced_leaves_after_finalizing`]. #[derive(Clone, Debug)] pub struct DisplacedLeavesAfterFinalization { - /// A collection of hashes and block numbers for displaced leaves. - pub displaced_leaves: BTreeMap>, + /// A list of hashes and block numbers of displaced leaves. + pub displaced_leaves: Vec<(NumberFor, Block::Hash)>, - /// A collection of tree routes from the leaves to finalized block. - pub tree_routes: BTreeMap>, + /// A list of hashes displaced blocks from all displaced leaves. + pub displaced_blocks: Vec, } impl Default for DisplacedLeavesAfterFinalization { fn default() -> Self { - Self { displaced_leaves: Default::default(), tree_routes: Default::default() } + Self { displaced_leaves: Vec::new(), displaced_blocks: Vec::new() } } } impl DisplacedLeavesAfterFinalization { /// Returns a collection of hashes for the displaced leaves. pub fn hashes(&self) -> impl Iterator + '_ { - self.displaced_leaves.keys().cloned() + self.displaced_leaves.iter().map(|(_, hash)| *hash) } } diff --git a/substrate/primitives/blockchain/src/header_metadata.rs b/substrate/primitives/blockchain/src/header_metadata.rs index 27caaae71add1a440b8a772db131329c951befbc..30024765add3ced4b5295a023cb0980aae3d59fd 100644 --- a/substrate/primitives/blockchain/src/header_metadata.rs +++ b/substrate/primitives/blockchain/src/header_metadata.rs @@ -20,12 +20,16 @@ use parking_lot::RwLock; use schnellru::{ByLength, LruMap}; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One}; +use sp_core::U256; +use sp_runtime::{ + traits::{Block as BlockT, Header, NumberFor, One}, + Saturating, +}; /// Set to the expected max difference between `best` and `finalized` blocks at sync. -const LRU_CACHE_SIZE: u32 = 5_000; +pub(crate) const LRU_CACHE_SIZE: u32 = 5_000; -/// Get lowest common ancestor between two blocks in the tree. +/// Get the lowest common ancestor between two blocks in the tree. /// /// This implementation is efficient because our trees have very few and /// small branches, and because of our current query pattern: @@ -105,15 +109,16 @@ pub fn tree_route + ?Sized>( let mut from = backend.header_metadata(from)?; let mut to = backend.header_metadata(to)?; - let mut from_branch = Vec::new(); - let mut to_branch = Vec::new(); - + let mut to_branch = + Vec::with_capacity(Into::::into(to.number.saturating_sub(from.number)).as_usize()); while to.number > from.number { to_branch.push(HashAndNumber { number: to.number, hash: to.hash }); to = backend.header_metadata(to.parent)?; } + let mut from_branch = + Vec::with_capacity(Into::::into(to.number.saturating_sub(from.number)).as_usize()); while from.number > to.number { from_branch.push(HashAndNumber { number: from.number, hash: from.hash }); from = backend.header_metadata(from.parent)?; @@ -132,6 +137,7 @@ pub fn tree_route + ?Sized>( // add the pivot block. and append the reversed to-branch // (note that it's reverse order originals) let pivot = from_branch.len(); + from_branch.reserve_exact(to_branch.len() + 1); from_branch.push(HashAndNumber { number: to.number, hash: to.hash }); from_branch.extend(to_branch.into_iter().rev()); @@ -149,7 +155,7 @@ pub struct HashAndNumber { /// A tree-route from one block to another in the chain. /// -/// All blocks prior to the pivot in the deque is the reverse-order unique ancestry +/// All blocks prior to the pivot in the vector is the reverse-order unique ancestry /// of the first block, the block at the pivot index is the common ancestor, /// and all blocks after the pivot is the ancestry of the second block, in /// order. diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index a54499178171dab260e953dd1082d512d48f6df0..3ae45062cb6814405d4398766fd636fc3c1b68ba 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -16,15 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false } -sp-consensus-slots = { path = "../slots", default-features = false } -sp-inherents = { path = "../../inherents", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } -sp-timestamp = { path = "../../timestamp", default-features = false } +async-trait = { optional = true, workspace = true } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-slots = { workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } +sp-timestamp = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 46c032ba61a6067bd7cfa9f786706701c7e429c3..884fc6c25c97da6b04be5046d7b693b09be49c13 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +async-trait = { optional = true, workspace = true } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false } -sp-consensus-slots = { path = "../slots", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-inherents = { path = "../../inherents", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } -sp-timestamp = { path = "../../timestamp", optional = true, default-features = false } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-slots = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } +sp-timestamp = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index a682939a02f95064f8fed2fe7f6be0a9ad228ede..49d907506049aa4ac4067e3e05cbcd5542495b53 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -15,23 +15,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-crypto-hashing = { path = "../../crypto/hashing", default-features = false } -sp-io = { path = "../../io", default-features = false } -sp-mmr-primitives = { path = "../../merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } -sp-keystore = { path = "../../keystore", default-features = false } -strum = { version = "0.26.2", features = ["derive"], default-features = false } -lazy_static = { version = "1.4.0", optional = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-core = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-io = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-runtime = { workspace = true } +sp-keystore = { workspace = true } +strum = { features = ["derive"], workspace = true } +lazy_static = { optional = true, workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -w3f-bls = { version = "0.1.3", features = ["std"] } +array-bytes = { workspace = true, default-features = true } +w3f-bls = { features = ["std"], workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 913184402aef7bf9d1ee906faea935262e177a93..7f6f733d0e39a3a1181113d63178f3247d447d38 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -53,7 +53,7 @@ use scale_info::TypeInfo; use sp_application_crypto::{AppPublic, RuntimeAppPublic}; use sp_core::H256; use sp_runtime::{ - traits::{Hash, Keccak256, NumberFor}, + traits::{Hash, Header as HeaderT, Keccak256, NumberFor}, OpaqueValue, }; @@ -307,8 +307,10 @@ pub struct VoteMessage { pub signature: Signature, } -/// Proof of voter misbehavior on a given set id. Misbehavior/equivocation in -/// BEEFY happens when a voter votes on the same round/block for different payloads. +/// Proof showing that an authority voted twice in the same round. +/// +/// One type of misbehavior in BEEFY happens when an authority votes in the same round/block +/// for different payloads. /// Proving is achieved by collecting the signed commitments of conflicting votes. #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct DoubleVotingProof { @@ -333,6 +335,27 @@ impl DoubleVotingProof { } } +/// Proof showing that an authority voted for a non-canonical chain. +/// +/// Proving is achieved by providing a proof that contains relevant info about the canonical chain +/// at `commitment.block_number`. The `commitment` can be checked against this info. +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct ForkVotingProof { + /// The equivocated vote. + pub vote: VoteMessage, + /// Proof containing info about the canonical chain at `commitment.block_number`. + pub ancestry_proof: AncestryProof, + /// The header of the block where the ancestry proof was generated + pub header: Header, +} + +/// Proof showing that an authority voted for a future block. +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct FutureBlockVotingProof { + /// The equivocated vote. + pub vote: VoteMessage, +} + /// Check a commitment signature by encoding the commitment and /// verifying the provided signature using the expected authority id. pub fn check_commitment_signature( @@ -351,7 +374,7 @@ where /// Verifies the equivocation proof by making sure that both votes target /// different blocks and that its signatures are valid. -pub fn check_equivocation_proof( +pub fn check_double_voting_proof( report: &DoubleVotingProof::Signature>, ) -> bool where @@ -398,6 +421,25 @@ impl OnNewValidatorSet for () { fn on_new_validator_set(_: &ValidatorSet, _: &ValidatorSet) {} } +/// Hook containing helper methods for proving/checking commitment canonicity. +pub trait AncestryHelper { + /// Type containing proved info about the canonical chain at a certain height. + type Proof: Clone + Debug + Decode + Encode + PartialEq + TypeInfo; + /// The data needed for validating the proof. + type ValidationContext; + + /// Extract the validation context from the provided header. + fn extract_validation_context(header: Header) -> Option; + + /// Check if a commitment is pointing to a header on a non-canonical chain + /// against a canonicity proof generated at the same header height. + fn is_non_canonical( + commitment: &Commitment, + proof: Self::Proof, + context: Self::ValidationContext, + ) -> bool; +} + /// An opaque type used to represent the key ownership proof at the runtime API /// boundary. The inner value is an encoded representation of the actual key /// ownership proof which will be parameterized when defining the runtime. At @@ -408,7 +450,7 @@ pub type OpaqueKeyOwnershipProof = OpaqueValue; sp_api::decl_runtime_apis! { /// API necessary for BEEFY voters. - #[api_version(3)] + #[api_version(4)] pub trait BeefyApi where AuthorityId : Codec + RuntimeAppPublic, { @@ -418,15 +460,15 @@ sp_api::decl_runtime_apis! { /// Return the current active BEEFY validator set fn validator_set() -> Option>; - /// Submits an unsigned extrinsic to report an equivocation. The caller - /// must provide the equivocation proof and a key ownership proof + /// Submits an unsigned extrinsic to report a double voting equivocation. The caller + /// must provide the double voting proof and a key ownership proof /// (should be obtained using `generate_key_ownership_proof`). The /// extrinsic will be unsigned and should only be accepted for local /// authorship (not to be broadcast to the network). This method returns /// `None` when creation of the extrinsic fails, e.g. if equivocation /// reporting is disabled for the given runtime (i.e. this method is /// hardcoded to return `None`). Only useful in an offchain context. - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( equivocation_proof: DoubleVotingProof, AuthorityId, ::Signature>, key_owner_proof: OpaqueKeyOwnershipProof, diff --git a/substrate/primitives/consensus/beefy/src/payload.rs b/substrate/primitives/consensus/beefy/src/payload.rs index 1a06e620e7ad400ed10c5451d453f403da1f3688..d22255c384bc2f2a87c607e208ef10157ac1b405 100644 --- a/substrate/primitives/consensus/beefy/src/payload.rs +++ b/substrate/primitives/consensus/beefy/src/payload.rs @@ -58,7 +58,7 @@ impl Payload { /// Returns a decoded payload value under given `id`. /// - /// In case the value is not there or it cannot be decoded does not match `None` is returned. + /// In case the value is not there, or it cannot be decoded `None` is returned. pub fn get_decoded(&self, id: &BeefyPayloadId) -> Option { self.get_raw(id).and_then(|raw| T::decode(&mut &raw[..]).ok()) } diff --git a/substrate/primitives/consensus/beefy/src/test_utils.rs b/substrate/primitives/consensus/beefy/src/test_utils.rs index d7fd49214f12fe5e3f3b4174f23bb2a57e295deb..bd335ede489380541fe1b2c166d58f59e2a88614 100644 --- a/substrate/primitives/consensus/beefy/src/test_utils.rs +++ b/substrate/primitives/consensus/beefy/src/test_utils.rs @@ -18,12 +18,12 @@ #[cfg(feature = "bls-experimental")] use crate::ecdsa_bls_crypto; use crate::{ - ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, DoubleVotingProof, Payload, - ValidatorSetId, VoteMessage, + ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, DoubleVotingProof, + ForkVotingProof, FutureBlockVotingProof, Payload, ValidatorSetId, VoteMessage, }; use sp_application_crypto::{AppCrypto, AppPair, RuntimeAppPublic, Wraps}; use sp_core::{ecdsa, Pair}; -use sp_runtime::traits::Hash; +use sp_runtime::traits::{BlockNumber, Hash, Header as HeaderT}; use codec::Encode; use std::{collections::HashMap, marker::PhantomData}; @@ -136,20 +136,42 @@ impl From> for ecdsa_crypto::Public { } } -/// Create a new `EquivocationProof` based on given arguments. -pub fn generate_equivocation_proof( +/// Create a new `VoteMessage` from commitment primitives and keyring +pub fn signed_vote( + block_number: Number, + payload: Payload, + validator_set_id: ValidatorSetId, + keyring: &Keyring, +) -> VoteMessage { + let commitment = Commitment { validator_set_id, block_number, payload }; + let signature = keyring.sign(&commitment.encode()); + VoteMessage { commitment, id: keyring.public(), signature } +} + +/// Create a new `DoubleVotingProof` based on given arguments. +pub fn generate_double_voting_proof( vote1: (u64, Payload, ValidatorSetId, &Keyring), vote2: (u64, Payload, ValidatorSetId, &Keyring), ) -> DoubleVotingProof { - let signed_vote = |block_number: u64, - payload: Payload, - validator_set_id: ValidatorSetId, - keyring: &Keyring| { - let commitment = Commitment { validator_set_id, block_number, payload }; - let signature = keyring.sign(&commitment.encode()); - VoteMessage { commitment, id: keyring.public(), signature } - }; let first = signed_vote(vote1.0, vote1.1, vote1.2, vote1.3); let second = signed_vote(vote2.0, vote2.1, vote2.2, vote2.3); DoubleVotingProof { first, second } } + +/// Create a new `ForkVotingProof` based on vote & canonical header. +pub fn generate_fork_voting_proof, AncestryProof>( + vote: (u64, Payload, ValidatorSetId, &Keyring), + ancestry_proof: AncestryProof, + header: Header, +) -> ForkVotingProof { + let signed_vote = signed_vote(vote.0, vote.1, vote.2, vote.3); + ForkVotingProof { vote: signed_vote, ancestry_proof, header } +} + +/// Create a new `ForkVotingProof` based on vote & canonical header. +pub fn generate_future_block_voting_proof( + vote: (u64, Payload, ValidatorSetId, &Keyring), +) -> FutureBlockVotingProof { + let signed_vote = signed_vote(vote.0, vote.1, vote.2, vote.3); + FutureBlockVotingProof { vote: signed_vote } +} diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 90aeadd5055e6e17107b70070ca30e17dd6b1c7c..a5d9a8da1a9bef6ac1faed5c61c6e12720570f59 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -17,18 +17,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -futures = { version = "0.3.30", features = ["thread-pool"] } +async-trait = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } -sp-core = { path = "../../core" } -sp-inherents = { path = "../../inherents" } -sp-runtime = { path = "../../runtime" } -sp-state-machine = { path = "../../state-machine" } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } [dev-dependencies] -futures = "0.3.30" -sp-test-primitives = { path = "../../test-primitives" } +futures = { workspace = true } +sp-test-primitives = { workspace = true } [features] default = [] diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index 9a59575a22c3908c53665522233ac1d7e688a0ed..6ba5bb40595a0d626e34f6a20189d9572f43af25 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -finality-grandpa = { version = "0.16.2", default-features = false, features = ["derive-codec"] } +codec = { features = ["derive"], workspace = true } +finality-grandpa = { features = ["derive-codec"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-keystore = { path = "../../keystore", default-features = false, optional = true } -sp-runtime = { path = "../../runtime", default-features = false } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-core = { workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/pow/Cargo.toml b/substrate/primitives/consensus/pow/Cargo.toml index 0700e2c4f8b9f1595ca820ab0cc3fe5e963dec91..d4563cf8e6afdbb05556c17ab1235b0fa7caadba 100644 --- a/substrate/primitives/consensus/pow/Cargo.toml +++ b/substrate/primitives/consensus/pow/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-api = { path = "../../api", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } +codec = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 792755730839226e2d200cdabccaa6f8bed0dda5..12bcbc1b33928dce6c08091cef8e9e0fff862d89 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -18,14 +18,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] } -sp-consensus-slots = { path = "../slots", default-features = false } -sp-core = { path = "../../core", default-features = false, features = ["bandersnatch-experimental"] } -sp-runtime = { path = "../../runtime", default-features = false } +sp-api = { workspace = true } +sp-application-crypto = { features = ["bandersnatch-experimental"], workspace = true } +sp-consensus-slots = { workspace = true } +sp-core = { features = ["bandersnatch-experimental"], workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index dd519eab46475fb16f4768de2fab6138c5a059b4..9d881c3acd0045185584b05c06531345fcb911e5 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -sp-timestamp = { path = "../../timestamp", default-features = false } +sp-timestamp = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index f931faf8bd043406d2d30943a594d94ad745ae05..2ba4f959a5f9e7a14f9fdd2adf04b05d4b6b16b9 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -16,60 +16,60 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } -bounded-collections = { version = "0.2.0", default-features = false } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } -impl-serde = { version = "0.4.0", default-features = false, optional = true } -hash-db = { version = "0.16.0", default-features = false } -hash256-std-hasher = { version = "0.15.2", default-features = false } -bs58 = { version = "0.5.0", default-features = false, optional = true } -rand = { version = "0.8.5", features = ["small_rng"], optional = true } -substrate-bip39 = { path = "../../utils/substrate-bip39", default-features = false } +bounded-collections = { workspace = true } +primitive-types = { features = ["codec", "scale-info"], workspace = true } +impl-serde = { optional = true, workspace = true } +hash-db = { workspace = true } +hash256-std-hasher = { workspace = true } +bs58 = { optional = true, workspace = true } +rand = { features = ["small_rng"], optional = true, workspace = true, default-features = true } +substrate-bip39 = { workspace = true } # personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 bip39 = { package = "parity-bip39", version = "2.0.1", default-features = false, features = ["alloc"] } -zeroize = { version = "1.4.3", default-features = false } -secrecy = { version = "0.8.0", default-features = false, features = ["alloc"] } -parking_lot = { version = "0.12.1", optional = true } -ss58-registry = { version = "1.34.0", default-features = false } -sp-std = { path = "../std", default-features = false } -sp-debug-derive = { path = "../debug-derive", default-features = false } -sp-storage = { path = "../storage", default-features = false } -sp-externalities = { path = "../externalities", optional = true, default-features = false } -futures = { version = "0.3.30", optional = true } -dyn-clonable = { version = "0.9.0", optional = true } +zeroize = { workspace = true } +secrecy = { features = ["alloc"], workspace = true } +parking_lot = { optional = true, workspace = true, default-features = true } +ss58-registry = { workspace = true } +sp-std = { workspace = true } +sp-debug-derive = { workspace = true } +sp-storage = { workspace = true } +sp-externalities = { optional = true, workspace = true } +futures = { optional = true, workspace = true } +dyn-clonable = { optional = true, workspace = true } thiserror = { optional = true, workspace = true } -tracing = { version = "0.1.29", optional = true } -bitflags = "1.3" -paste = "1.0.7" -itertools = { version = "0.11", optional = true } +tracing = { optional = true, workspace = true, default-features = true } +bitflags = { workspace = true } +paste = { workspace = true, default-features = true } +itertools = { optional = true, workspace = true } # full crypto -array-bytes = { version = "6.2.2" } -ed25519-zebra = { version = "4.0.3", default-features = false } -blake2 = { version = "0.10.4", default-features = false, optional = true } -libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"] } -schnorrkel = { version = "0.11.4", features = ["preaudit_deprecated"], default-features = false } -merlin = { version = "3.0", default-features = false } -sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } -sp-runtime-interface = { path = "../runtime-interface", default-features = false } +array-bytes = { workspace = true, default-features = true } +ed25519-zebra = { workspace = true } +blake2 = { optional = true, workspace = true } +libsecp256k1 = { features = ["static-context"], workspace = true } +schnorrkel = { features = ["preaudit_deprecated"], workspace = true } +merlin = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-runtime-interface = { workspace = true } # k256 crate, better portability, intended to be used in substrate-runtimes (no-std) -k256 = { version = "0.13.3", features = ["alloc", "ecdsa"], default-features = false } +k256 = { features = ["alloc", "ecdsa"], workspace = true } # secp256k1 crate, better performance, intended to be used on host side (std) -secp256k1 = { version = "0.28.0", default-features = false, features = ["alloc", "recovery"], optional = true } +secp256k1 = { features = ["alloc", "recovery"], optional = true, workspace = true } # bls crypto -w3f-bls = { version = "0.1.3", default-features = false, optional = true } +w3f-bls = { optional = true, workspace = true } # bandersnatch crypto bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "e9782f9", default-features = false, features = ["substrate-curves"], optional = true } [dev-dependencies] -criterion = "0.5.1" +criterion = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -lazy_static = "1.4.0" -regex = "1.6.0" +lazy_static = { workspace = true } +regex = { workspace = true } [[bench]] name = "bench" diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml index 463eaea8ea30d827c6f39b7cc02e8947a085c88b..46dfe8d483b7468b06d3dccb5494e6c6ec8cefe0 100644 --- a/substrate/primitives/core/fuzz/Cargo.toml +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -11,11 +11,11 @@ workspace = true cargo-fuzz = true [dependencies] -lazy_static = "1.4.0" -libfuzzer-sys = "0.4" -regex = "1.10.2" +lazy_static = { workspace = true } +libfuzzer-sys = { workspace = true } +regex = { workspace = true } -sp-core = { path = ".." } +sp-core = { workspace = true, default-features = true } [[bin]] name = "fuzz_address_uri" diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 098bd135bfebba0d83bb2f58e72b4779bdf441b7..78ec92e4be98db3ac12aaba165fdcdfa596375e3 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -430,16 +430,7 @@ pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB macro_rules! generate_feature_enabled_macro { ( $macro_name:ident, $feature_name:meta, $d:tt ) => { $crate::paste::paste!{ - /// Enable/disable the given code depending on - #[doc = concat!("`", stringify!($feature_name), "`")] - /// being enabled for the crate or not. - /// - /// # Example /// - /// ```nocompile - /// // Will add the code depending on the feature being enabled or not. - #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] - /// ``` #[cfg($feature_name)] #[macro_export] macro_rules! [<_ $macro_name>] { @@ -448,6 +439,13 @@ macro_rules! generate_feature_enabled_macro { } } + /// + #[cfg(not($feature_name))] + #[macro_export] + macro_rules! [<_ $macro_name>] { + ( $d ( $d input:tt )* ) => {}; + } + /// Enable/disable the given code depending on #[doc = concat!("`", stringify!($feature_name), "`")] /// being enabled for the crate or not. @@ -458,15 +456,8 @@ macro_rules! generate_feature_enabled_macro { /// // Will add the code depending on the feature being enabled or not. #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] /// ``` - #[cfg(not($feature_name))] - #[macro_export] - macro_rules! [<_ $macro_name>] { - ( $d ( $d input:tt )* ) => {}; - } - - // Work around for: - #[doc(hidden)] - pub use [<_ $macro_name>] as $macro_name; + // https://github.com/rust-lang/rust/pull/52234 + pub use [<_ $macro_name>] as $macro_name; } }; } diff --git a/substrate/primitives/crypto/ec-utils/Cargo.toml b/substrate/primitives/crypto/ec-utils/Cargo.toml index 142a5abf9b30d660077d39c12808a26d47bd41b6..1068787728babd906c0aaa0ff9d83c559f8d4f6d 100644 --- a/substrate/primitives/crypto/ec-utils/Cargo.toml +++ b/substrate/primitives/crypto/ec-utils/Cargo.toml @@ -15,19 +15,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ark-ec = { version = "0.4.2", default-features = false, optional = true } -ark-bls12-377-ext = { version = "0.4.1", default-features = false, optional = true } -ark-bls12-377 = { version = "0.4.0", default-features = false, features = ["curve"], optional = true } -ark-bls12-381-ext = { version = "0.4.1", default-features = false, optional = true } -ark-bls12-381 = { version = "0.4.0", default-features = false, features = ["curve"], optional = true } -ark-bw6-761-ext = { version = "0.4.1", default-features = false, optional = true } -ark-bw6-761 = { version = "0.4.0", default-features = false, optional = true } -ark-ed-on-bls12-381-bandersnatch-ext = { version = "0.4.1", default-features = false, optional = true } -ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false, optional = true } -ark-ed-on-bls12-377-ext = { version = "0.4.1", default-features = false, optional = true } -ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false, optional = true } -ark-scale = { version = "0.0.12", default-features = false, features = ["hazmat"], optional = true } -sp-runtime-interface = { path = "../../runtime-interface", default-features = false, optional = true } +ark-ec = { optional = true, workspace = true } +ark-bls12-377-ext = { optional = true, workspace = true } +ark-bls12-377 = { features = ["curve"], optional = true, workspace = true } +ark-bls12-381-ext = { optional = true, workspace = true } +ark-bls12-381 = { features = ["curve"], optional = true, workspace = true } +ark-bw6-761-ext = { optional = true, workspace = true } +ark-bw6-761 = { optional = true, workspace = true } +ark-ed-on-bls12-381-bandersnatch-ext = { optional = true, workspace = true } +ark-ed-on-bls12-381-bandersnatch = { optional = true, workspace = true } +ark-ed-on-bls12-377-ext = { optional = true, workspace = true } +ark-ed-on-bls12-377 = { optional = true, workspace = true } +ark-scale = { features = ["hazmat"], optional = true, workspace = true } +sp-runtime-interface = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/crypto/hashing/Cargo.toml b/substrate/primitives/crypto/hashing/Cargo.toml index 1755164888bc9f846dd2ff587aaef5124dd8923d..461af269bf2dae83538ca07d4cf8190fca4e679a 100644 --- a/substrate/primitives/crypto/hashing/Cargo.toml +++ b/substrate/primitives/crypto/hashing/Cargo.toml @@ -16,16 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -blake2b_simd = { version = "1.0.1", default-features = false } -byteorder = { version = "1.3.2", default-features = false } -digest = { version = "0.10.3", default-features = false } -sha2 = { version = "0.10.7", default-features = false } -sha3 = { version = "0.10.0", default-features = false } -twox-hash = { version = "1.6.3", default-features = false, features = ["digest_0_10"] } +blake2b_simd = { workspace = true } +byteorder = { workspace = true } +digest = { workspace = true } +sha2 = { workspace = true } +sha3 = { workspace = true } +twox-hash = { features = ["digest_0_10"], workspace = true } [dev-dependencies] -criterion = "0.5.1" -sp-crypto-hashing-proc-macro = { path = "proc-macro" } +criterion = { workspace = true, default-features = true } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } [[bench]] name = "bench" diff --git a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml index f988042d3075933bda6ae62b48291be8ebc80936..68e865c7dac58613146ca1d9e6dc398346c7455b 100644 --- a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml @@ -21,4 +21,4 @@ proc-macro = true [dependencies] quote = { workspace = true } syn = { features = ["full", "parsing"], workspace = true } -sp-crypto-hashing = { path = "..", default-features = false } +sp-crypto-hashing = { workspace = true } diff --git a/substrate/primitives/database/Cargo.toml b/substrate/primitives/database/Cargo.toml index 081aad6075840c109d397d5830e3e654c3325172..c0867198e8b9be3eeb0827ec9cc556e6f4082940 100644 --- a/substrate/primitives/database/Cargo.toml +++ b/substrate/primitives/database/Cargo.toml @@ -14,5 +14,5 @@ readme = "README.md" workspace = true [dependencies] -kvdb = "0.13.0" -parking_lot = "0.12.1" +kvdb = { workspace = true } +parking_lot = { workspace = true, default-features = true } diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index debf964aa3dfdf7cebd23e0f1d24e74b0d880ec0..4f45d6525c4a9a153ad763847a4f79548127e8cc 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -21,7 +21,7 @@ proc-macro = true [dependencies] quote = { workspace = true } syn = { workspace = true } -proc-macro2 = "1.0.56" +proc-macro2 = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/externalities/Cargo.toml b/substrate/primitives/externalities/Cargo.toml index 3a0d0315e9178a0e54ba001064fb6feb65c1b731..ca2f57c03b948061a15b73f5d7271885e5bc8a3c 100644 --- a/substrate/primitives/externalities/Cargo.toml +++ b/substrate/primitives/externalities/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -environmental = { version = "1.1.3", default-features = false } -sp-storage = { path = "../storage", default-features = false } +codec = { workspace = true } +environmental = { workspace = true } +sp-storage = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/externalities/src/lib.rs b/substrate/primitives/externalities/src/lib.rs index 142200f614a69d8a5e195db8d434fba4bb5a83b7..bcc46ee4f1b2993fd34a5d632d2ce4a2757872e4 100644 --- a/substrate/primitives/externalities/src/lib.rs +++ b/substrate/primitives/externalities/src/lib.rs @@ -83,24 +83,24 @@ pub trait Externalities: ExtensionStore { fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>); /// Read runtime storage. - fn storage(&self, key: &[u8]) -> Option>; + fn storage(&mut self, key: &[u8]) -> Option>; /// Get storage value hash. /// /// This may be optimized for large values. - fn storage_hash(&self, key: &[u8]) -> Option>; + fn storage_hash(&mut self, key: &[u8]) -> Option>; /// Get child storage value hash. /// /// This may be optimized for large values. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Read child runtime storage. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Set storage entry `key` of current contract being called (effective immediately). fn set_storage(&mut self, key: Vec, value: Vec) { @@ -124,20 +124,20 @@ pub trait Externalities: ExtensionStore { } /// Whether a storage entry exists. - fn exists_storage(&self, key: &[u8]) -> bool { + fn exists_storage(&mut self, key: &[u8]) -> bool { self.storage(key).is_some() } /// Whether a child storage entry exists. - fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { + fn exists_child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> bool { self.child_storage(child_info, key).is_some() } /// Returns the key immediately following the given key, if it exists. - fn next_storage_key(&self, key: &[u8]) -> Option>; + fn next_storage_key(&mut self, key: &[u8]) -> Option>; /// Returns the key immediately following the given key, if it exists, in child storage. - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Clear an entire child storage. /// diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index 4fc8a0416fbe5f2ffdbd021dd42a3d072c61b241..d4345d17e7e5c20829448fed3e7c5bd1579bb928 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -16,11 +16,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { features = ["bytes"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-api = { path = "../api", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-api = { workspace = true } +sp-runtime = { workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } [features] diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index c63aca801a0d7e513963b06d2ce4375f17194d3d..1495287698d66ba712b853b6e5188957420b2cbd 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -17,15 +17,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2.2" +async-trait = { optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } thiserror = { optional = true, workspace = true } -sp-runtime = { path = "../runtime", default-features = false, optional = true } +sp-runtime = { optional = true, workspace = true } [dev-dependencies] -futures = "0.3.30" +futures = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index abb16d163da060ae1d152276189157b77fb6970d..6b58ea70b8e5ef3a31fc1b9b468e43b544d21343 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -18,31 +18,38 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bytes = { version = "1.1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } -sp-core = { path = "../core", default-features = false } -sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } -sp-keystore = { path = "../keystore", default-features = false, optional = true } -sp-std = { path = "../std", default-features = false } -libsecp256k1 = { version = "0.7", optional = true } -sp-state-machine = { path = "../state-machine", default-features = false, optional = true } -sp-runtime-interface = { path = "../runtime-interface", default-features = false } -sp-trie = { path = "../trie", default-features = false, optional = true } -sp-externalities = { path = "../externalities", default-features = false } -sp-tracing = { path = "../tracing", default-features = false } +bytes = { workspace = true } +codec = { features = [ + "bytes", +], workspace = true } +sp-core = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-std = { workspace = true } +libsecp256k1 = { optional = true, workspace = true, default-features = true } +sp-state-machine = { optional = true, workspace = true } +sp-runtime-interface = { workspace = true } +sp-trie = { optional = true, workspace = true } +sp-externalities = { workspace = true } +sp-tracing = { workspace = true } log = { optional = true, workspace = true, default-features = true } -secp256k1 = { version = "0.28.0", features = ["global-context", "recovery"], optional = true } -tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.32", default-features = false } +secp256k1 = { features = [ + "global-context", + "recovery", +], optional = true, workspace = true, default-features = true } +tracing = { workspace = true } +tracing-core = { workspace = true } # Required for backwards compatibility reason, but only used for verifying when `UseDalekExt` is set. -ed25519-dalek = { version = "2.1", default-features = false, optional = true } +ed25519-dalek = { optional = true, workspace = true } + +docify = { workspace = true } [target.'cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), substrate_runtime))'.dependencies] polkavm-derive = { workspace = true } [build-dependencies] -rustversion = "1.0.6" +rustversion = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index c8675a9a90bd2ee16b9deca27457fafa88f8fec0..67e822ba7e248ed2b07614b85dca4e1c72e8c3e3 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -181,7 +181,7 @@ impl From for KillStorageResult { #[runtime_interface] pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. - fn get(&self, key: &[u8]) -> Option { + fn get(&mut self, key: &[u8]) -> Option { self.storage(key).map(bytes::Bytes::from) } @@ -190,7 +190,7 @@ pub trait Storage { /// doesn't exist at all. /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. - fn read(&self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { + fn read(&mut self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { self.storage(key).map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -211,7 +211,7 @@ pub trait Storage { } /// Check whether the given `key` exists in storage. - fn exists(&self, key: &[u8]) -> bool { + fn exists(&mut self, key: &[u8]) -> bool { self.exists_storage(key) } @@ -387,7 +387,7 @@ pub trait DefaultChildStorage { /// /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the /// parent trie. Result is `None` if the value for `key` in the child storage can not be found. - fn get(&self, storage_key: &[u8], key: &[u8]) -> Option> { + fn get(&mut self, storage_key: &[u8], key: &[u8]) -> Option> { let child_info = ChildInfo::new_default(storage_key); self.child_storage(&child_info, key).map(|s| s.to_vec()) } @@ -400,7 +400,7 @@ pub trait DefaultChildStorage { /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. fn read( - &self, + &mut self, storage_key: &[u8], key: &[u8], value_out: &mut [u8], @@ -478,7 +478,7 @@ pub trait DefaultChildStorage { /// Check a child storage key. /// /// Check whether the given `key` exists in default child defined at `storage_key`. - fn exists(&self, storage_key: &[u8], key: &[u8]) -> bool { + fn exists(&mut self, storage_key: &[u8], key: &[u8]) -> bool { let child_info = ChildInfo::new_default(storage_key); self.exists_child_storage(&child_info, key) } @@ -1805,6 +1805,7 @@ pub type TestExternalities = sp_state_machine::TestExternalities(data); +}); diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index 8b6f746eaba0af9438557dc7dbe118bfbc4cdf34..6201d60ababd27d1e212791a03e62b7915be2c98 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -59,16 +59,17 @@ impl BasicExternalities { } /// Consume self and returns inner storages - pub fn into_storages(self) -> Storage { + #[cfg(feature = "std")] + pub fn into_storages(mut self) -> Storage { Storage { top: self .overlay - .changes() + .changes_mut() .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) .collect(), children_default: self .overlay - .children() + .children_mut() .map(|(iter, i)| { ( i.storage_key().to_vec(), @@ -87,6 +88,7 @@ impl BasicExternalities { /// Execute the given closure `f` with the externalities set and initialized with `storage`. /// /// Returns the result of the closure and updates `storage` with all changes. + #[cfg(feature = "std")] pub fn execute_with_storage( storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, @@ -118,19 +120,37 @@ impl BasicExternalities { } } +#[cfg(test)] impl PartialEq for BasicExternalities { - fn eq(&self, other: &BasicExternalities) -> bool { - self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == - other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && + fn eq(&self, other: &Self) -> bool { + self.overlay + .changes() + .map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>() == + other + .overlay + .changes() + .map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>() && self.overlay .children() - .map(|(iter, i)| (i, iter.map(|(k, v)| (k, v.value())).collect::>())) + .map(|(iter, i)| { + ( + i, + iter.map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>(), + ) + }) .collect::>() == other .overlay .children() .map(|(iter, i)| { - (i, iter.map(|(k, v)| (k, v.value())).collect::>()) + ( + i, + iter.map(|(k, v)| (k, v.value_ref().materialize())) + .collect::>(), + ) }) .collect::>() } @@ -159,27 +179,27 @@ impl From> for BasicExternalities { impl Externalities for BasicExternalities { fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} - fn storage(&self, key: &[u8]) -> Option { + fn storage(&mut self, key: &[u8]) -> Option { self.overlay.storage(key).and_then(|v| v.map(|v| v.to_vec())) } - fn storage_hash(&self, key: &[u8]) -> Option> { + fn storage_hash(&mut self, key: &[u8]) -> Option> { self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.overlay.child_storage(child_info, key).and_then(|v| v.map(|v| v.to_vec())) } - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn next_storage_key(&self, key: &[u8]) -> Option { + fn next_storage_key(&mut self, key: &[u8]) -> Option { self.overlay.iter_after(key).find_map(|(k, v)| v.value().map(|_| k.to_vec())) } - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.overlay .child_iter_after(child_info.storage_key(), key) .find_map(|(k, v)| v.value().map(|_| k.to_vec())) @@ -243,15 +263,14 @@ impl Externalities for BasicExternalities { MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } - fn storage_append(&mut self, key: Vec, value: Vec) { - let current_value = self.overlay.value_mut_or_insert_with(&key, || Default::default()); - crate::ext::StorageAppend::new(current_value).append(value); + fn storage_append(&mut self, key: Vec, element: Vec) { + self.overlay.append_storage(key, element, Default::default); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { let mut top = self .overlay - .changes() + .changes_mut() .filter_map(|(k, v)| v.value().map(|v| (k.clone(), v.clone()))) .collect::>(); // Single child trie implementation currently allows using the same child @@ -278,7 +297,7 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, state_version: StateVersion, ) -> Vec { - if let Some((data, child_info)) = self.overlay.child_changes(child_info.storage_key()) { + if let Some((data, child_info)) = self.overlay.child_changes_mut(child_info.storage_key()) { let delta = data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice()))); crate::in_memory_backend::new_in_mem::() diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 9aa32bc866cfab9e2db44959c93bc4fe0b11e120..7a79c4e8a1f1bbd51fbdb34f5b91d2427bdd2995 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -22,7 +22,7 @@ use crate::overlayed_changes::OverlayedExtensions; use crate::{ backend::Backend, IndexOperation, IterArgs, OverlayedChanges, StorageKey, StorageValue, }; -use codec::{Encode, EncodeAppend}; +use codec::{Compact, CompactLen, Decode, Encode}; use hash_db::Hasher; #[cfg(feature = "std")] use sp_core::hexdisplay::HexDisplay; @@ -31,8 +31,8 @@ use sp_core::storage::{ }; use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; -use crate::{log_error, trace, warn}; -use alloc::{boxed::Box, vec, vec::Vec}; +use crate::{trace, warn}; +use alloc::{boxed::Box, vec::Vec}; use core::{ any::{Any, TypeId}, cmp::Ordering, @@ -139,7 +139,7 @@ where H::Out: Ord + 'static, B: 'a + Backend, { - pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { + pub fn storage_pairs(&mut self) -> Vec<(StorageKey, StorageValue)> { use std::collections::HashMap; self.backend @@ -147,7 +147,7 @@ where .expect("never fails in tests; qed.") .map(|key_value| key_value.expect("never fails in tests; qed.")) .map(|(k, v)| (k, Some(v))) - .chain(self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned()))) + .chain(self.overlay.changes_mut().map(|(k, v)| (k.clone(), v.value().cloned()))) .collect::>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) @@ -165,7 +165,7 @@ where self.overlay.set_offchain_storage(key, value) } - fn storage(&self, key: &[u8]) -> Option { + fn storage(&mut self, key: &[u8]) -> Option { let _guard = guard(); let result = self .overlay @@ -191,7 +191,7 @@ where result } - fn storage_hash(&self, key: &[u8]) -> Option> { + fn storage_hash(&mut self, key: &[u8]) -> Option> { let _guard = guard(); let result = self .overlay @@ -209,7 +209,7 @@ where result.map(|r| r.encode()) } - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { let _guard = guard(); let result = self .overlay @@ -231,7 +231,7 @@ where result } - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { let _guard = guard(); let result = self .overlay @@ -253,7 +253,7 @@ where result.map(|r| r.encode()) } - fn exists_storage(&self, key: &[u8]) -> bool { + fn exists_storage(&mut self, key: &[u8]) -> bool { let _guard = guard(); let result = match self.overlay.storage(key) { Some(x) => x.is_some(), @@ -271,7 +271,7 @@ where result } - fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { + fn exists_child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> bool { let _guard = guard(); let result = match self.overlay.child_storage(child_info, key) { @@ -293,7 +293,7 @@ where result } - fn next_storage_key(&self, key: &[u8]) -> Option { + fn next_storage_key(&mut self, key: &[u8]) -> Option { let mut next_backend_key = self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL); let mut overlay_changes = self.overlay.iter_after(key).peekable(); @@ -331,7 +331,7 @@ where } } - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { let mut next_backend_key = self .backend .next_child_storage_key(child_info, key) @@ -501,10 +501,9 @@ where let _guard = guard(); let backend = &mut self.backend; - let current_value = self.overlay.value_mut_or_insert_with(&key, || { + self.overlay.append_storage(key.clone(), value, || { backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() }); - StorageAppend::new(current_value).append(value); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { @@ -731,10 +730,27 @@ impl<'a> StorageAppend<'a> { Self(storage) } + /// Extract the length of the list like data structure. + pub fn extract_length(&self) -> Option { + Compact::::decode(&mut &self.0[..]).map(|c| c.0).ok() + } + + /// Replace the length in the encoded data. + /// + /// If `old_length` is `None`, the previous length will be assumed to be `0`. + pub fn replace_length(&mut self, old_length: Option, new_length: u32) { + let old_len_encoded_len = old_length.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let new_len_encoded = Compact::(new_length).encode(); + self.0.splice(0..old_len_encoded_len, new_len_encoded); + } + /// Append the given `value` to the storage item. /// - /// If appending fails, `[value]` is stored in the storage item. - pub fn append(&mut self, value: Vec) { + /// If appending fails, `[value]` is stored in the storage item and we return false. + #[cfg(any(test, feature = "fuzzing"))] + pub fn append(&mut self, value: Vec) -> bool { + use codec::EncodeAppend; + let mut result = true; let value = vec![EncodeOpaqueValue(value)]; let item = core::mem::take(self.0); @@ -742,13 +758,20 @@ impl<'a> StorageAppend<'a> { *self.0 = match Vec::::append_or_new(item, &value) { Ok(item) => item, Err(_) => { - log_error!( + result = false; + crate::log_error!( target: "runtime", "Failed to append value, resetting storage item to `[value]`.", ); value.encode() }, }; + result + } + + /// Append to current buffer, do not touch the prefixed length. + pub fn append_raw(&mut self, mut value: Vec) { + self.0.append(&mut value) } } @@ -849,7 +872,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); @@ -865,7 +888,7 @@ mod tests { drop(ext); overlay.set_storage(vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); @@ -895,7 +918,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); assert_eq!(ext.next_storage_key(&[5]), Some(vec![30])); @@ -928,7 +951,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); @@ -944,7 +967,7 @@ mod tests { drop(ext); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); @@ -975,7 +998,7 @@ mod tests { ) .into(); - let ext = TestExt::new(&mut overlay, &backend, None); + let mut ext = TestExt::new(&mut overlay, &backend, None); assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( diff --git a/substrate/primitives/state-machine/src/fuzzing.rs b/substrate/primitives/state-machine/src/fuzzing.rs new file mode 100644 index 0000000000000000000000000000000000000000..e147e6e88003cf720d4f6062c392630602210388 --- /dev/null +++ b/substrate/primitives/state-machine/src/fuzzing.rs @@ -0,0 +1,319 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! State machine fuzzing implementation, behind `fuzzing` feature. + +use super::{ext::Ext, *}; +use crate::ext::StorageAppend; +use arbitrary::Arbitrary; +#[cfg(test)] +use codec::Encode; +use hash_db::Hasher; +use sp_core::{storage::StateVersion, traits::Externalities}; +#[cfg(test)] +use sp_runtime::traits::BlakeTwo256; +use sp_trie::PrefixedMemoryDB; +use std::collections::BTreeMap; + +#[derive(Arbitrary, Debug, Clone)] +enum DataLength { + Zero = 0, + Small = 1, + Medium = 3, + Big = 300, // 2 byte scale encode length +} + +#[derive(Arbitrary, Debug, Clone)] +#[repr(u8)] +enum DataValue { + A = b'a', + B = b'b', + C = b'c', + D = b'd', // This can be read as a multiple byte compact length. + EasyBug = 20u8, // value compact len. +} + +/// Action to fuzz +#[derive(Arbitrary, Debug, Clone)] +enum FuzzAppendItem { + Append(DataValue, DataLength), + Insert(DataValue, DataLength), + StartTransaction, + RollbackTransaction, + CommitTransaction, + Read, + Remove, + // To go over 256 items easily (different compact size then). + Append50(DataValue, DataLength), +} + +/// Arbitrary payload for fuzzing append. +#[derive(Arbitrary, Debug, Clone)] +pub struct FuzzAppendPayload(Vec, Option<(DataValue, DataLength)>); + +struct SimpleOverlay { + data: Vec, Option>>>, +} + +impl Default for SimpleOverlay { + fn default() -> Self { + Self { data: vec![BTreeMap::new()] } + } +} + +impl SimpleOverlay { + fn insert(&mut self, key: Vec, value: Option>) { + self.data.last_mut().expect("always at least one item").insert(key, value); + } + + fn append( + &mut self, + key: Vec, + value: Vec, + backend: &mut TrieBackend, H>, + ) where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, + { + let current_value = self + .data + .last_mut() + .expect("always at least one item") + .entry(key.clone()) + .or_insert_with(|| { + Some(backend.storage(&key).expect("Ext not allowed to fail").unwrap_or_default()) + }); + if current_value.is_none() { + *current_value = Some(vec![]); + } + StorageAppend::new(current_value.as_mut().expect("init above")).append(value); + } + + fn get(&mut self, key: &[u8]) -> Option<&Vec> { + self.data + .last_mut() + .expect("always at least one item") + .get(key) + .and_then(|o| o.as_ref()) + } + + fn commit_transaction(&mut self) { + if let Some(to_commit) = self.data.pop() { + let dest = self.data.last_mut().expect("always at least one item"); + for (k, v) in to_commit.into_iter() { + dest.insert(k, v); + } + } + } + + fn rollback_transaction(&mut self) { + let _ = self.data.pop(); + } + + fn start_transaction(&mut self) { + let cloned = self.data.last().expect("always at least one item").clone(); + self.data.push(cloned); + } +} + +struct FuzzAppendState { + key: Vec, + + // reference simple implementation + reference: SimpleOverlay, + + // trie backend + backend: TrieBackend, H>, + // Standard Overlay + overlay: OverlayedChanges, + + // block dropping/commiting too many transaction + transaction_depth: usize, +} + +impl FuzzAppendState +where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, +{ + fn process_item(&mut self, item: FuzzAppendItem) { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + match item { + FuzzAppendItem::Append(value, length) => { + let value = vec![value as u8; length as usize]; + ext.storage_append(self.key.clone(), value.clone()); + self.reference.append(self.key.clone(), value, &mut self.backend); + }, + FuzzAppendItem::Append50(value, length) => { + let value = vec![value as u8; length as usize]; + for _ in 0..50 { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + ext.storage_append(self.key.clone(), value.clone()); + self.reference.append(self.key.clone(), value.clone(), &mut self.backend); + } + }, + FuzzAppendItem::Insert(value, length) => { + let value = vec![value as u8; length as usize]; + ext.set_storage(self.key.clone(), value.clone()); + self.reference.insert(self.key.clone(), Some(value)); + }, + FuzzAppendItem::Remove => { + ext.clear_storage(&self.key); + self.reference.insert(self.key.clone(), None); + }, + FuzzAppendItem::Read => { + let left = ext.storage(self.key.as_slice()); + let right = self.reference.get(self.key.as_slice()); + assert_eq!(left.as_ref(), right); + }, + FuzzAppendItem::StartTransaction => { + self.transaction_depth += 1; + self.reference.start_transaction(); + ext.storage_start_transaction(); + }, + FuzzAppendItem::RollbackTransaction => { + if self.transaction_depth == 0 { + return + } + self.transaction_depth -= 1; + self.reference.rollback_transaction(); + ext.storage_rollback_transaction().unwrap(); + }, + FuzzAppendItem::CommitTransaction => { + if self.transaction_depth == 0 { + return + } + self.transaction_depth -= 1; + self.reference.commit_transaction(); + ext.storage_commit_transaction().unwrap(); + }, + } + } + + fn check_final_state(&mut self) { + let mut ext = Ext::new(&mut self.overlay, &mut self.backend, None); + let left = ext.storage(self.key.as_slice()); + let right = self.reference.get(self.key.as_slice()); + assert_eq!(left.as_ref(), right); + } +} + +#[test] +fn fuzz_scenarii() { + assert_eq!(codec::Compact(5u16).encode()[0], DataValue::EasyBug as u8); + let scenarii = vec![ + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append50(DataValue::D, DataLength::Small), + FuzzAppendItem::Read, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::D, DataLength::Small), + FuzzAppendItem::Read, + FuzzAppendItem::RollbackTransaction, + ], + Some((DataValue::D, DataLength::Small)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::B, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Remove, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::Remove, + ], + Some((DataValue::EasyBug, DataLength::Small)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Small), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Medium), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Remove, + FuzzAppendItem::CommitTransaction, + FuzzAppendItem::RollbackTransaction, + ], + Some((DataValue::B, DataLength::Big)), + ), + ( + vec![ + FuzzAppendItem::Append(DataValue::A, DataLength::Big), + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Medium), + FuzzAppendItem::Remove, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::StartTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + ], + None, + ), + ( + vec![ + FuzzAppendItem::StartTransaction, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::RollbackTransaction, + FuzzAppendItem::Append(DataValue::A, DataLength::Zero), + ], + None, + ), + (vec![FuzzAppendItem::StartTransaction], Some((DataValue::EasyBug, DataLength::Zero))), + ]; + + for (scenario, init) in scenarii.into_iter() { + fuzz_append::(FuzzAppendPayload(scenario, init)); + } +} + +/// Test append operation for a given fuzzing payload. +pub fn fuzz_append(payload: FuzzAppendPayload) +where + H: Hasher, + H::Out: codec::Decode + codec::Encode + 'static, +{ + let FuzzAppendPayload(to_fuzz, initial) = payload; + let key = b"k".to_vec(); + let mut reference = SimpleOverlay::default(); + let initial: BTreeMap<_, _> = initial + .into_iter() + .map(|(v, l)| (key.clone(), vec![v as u8; l as usize])) + .collect(); + for (k, v) in initial.iter() { + reference.data[0].insert(k.clone(), Some(v.clone())); + } + reference.start_transaction(); // level 0 is backend, keep it untouched. + let overlay = OverlayedChanges::default(); + + let mut state = FuzzAppendState:: { + key, + reference, + overlay, + backend: (initial, StateVersion::default()).into(), + transaction_depth: 0, + }; + for item in to_fuzz { + state.process_item(item); + } + state.check_final_state(); +} diff --git a/substrate/primitives/state-machine/src/in_memory_backend.rs b/substrate/primitives/state-machine/src/in_memory_backend.rs index 06fe6d4162a7f4c920c8f09c2e4e8b05a3c72c03..7ba7457a6bf18bfb17d3faa7c6d3aaea6cd36788 100644 --- a/substrate/primitives/state-machine/src/in_memory_backend.rs +++ b/substrate/primitives/state-machine/src/in_memory_backend.rs @@ -132,6 +132,7 @@ where } } +#[cfg(feature = "std")] impl From<(Storage, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 13087431d387b9e986519bbdcabfe9eb3c4fe5c5..289b08755f680605a28a16adabac3cb3698a83fc 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -27,6 +27,8 @@ pub mod backend; mod basic; mod error; mod ext; +#[cfg(feature = "fuzzing")] +pub mod fuzzing; #[cfg(feature = "std")] mod in_memory_backend; pub(crate) mod overlayed_changes; @@ -1273,7 +1275,7 @@ mod tests { assert_eq!( overlay - .changes() + .changes_mut() .map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ @@ -1299,7 +1301,7 @@ mod tests { assert_eq!( overlay - .changes() + .changes_mut() .map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ @@ -1340,7 +1342,7 @@ mod tests { assert_eq!( overlay - .children() + .children_mut() .flat_map(|(iter, _child_info)| iter) .map(|(k, v)| (k.clone(), v.value())) .collect::>(), @@ -1440,11 +1442,78 @@ mod tests { } overlay.rollback_transaction().unwrap(); { - let ext = Ext::new(&mut overlay, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } } + // Test that we can append twice to a key, then perform a remove operation. + // The test checks specifically that the append is merged with its parent transaction + // on commit. + #[test] + fn commit_merges_append_with_parent() { + #[derive(codec::Encode, codec::Decode)] + enum Item { + Item1, + Item2, + } + + let key = b"events".to_vec(); + let state = new_in_mem::(); + let backend = state.as_trie_backend(); + let mut overlay = OverlayedChanges::default(); + + // Append first item + overlay.start_transaction(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + ext.clear_storage(key.as_slice()); + ext.storage_append(key.clone(), Item::Item1.encode()); + } + + // Append second item + overlay.start_transaction(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode())); + + ext.storage_append(key.clone(), Item::Item2.encode()); + + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1, Item::Item2].encode()),); + } + + // Remove item + overlay.start_transaction(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + + ext.place_storage(key.clone(), None); + + assert_eq!(ext.storage(key.as_slice()), None); + } + + // Remove gets commited and merged into previous transaction + overlay.commit_transaction().unwrap(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + assert_eq!(ext.storage(key.as_slice()), None,); + } + + // Remove gets rolled back, we should see the initial append again. + overlay.rollback_transaction().unwrap(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode())); + } + + overlay.commit_transaction().unwrap(); + { + let mut ext = Ext::new(&mut overlay, backend, None); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::Item1].encode())); + } + } + #[test] fn remove_with_append_then_rollback_appended_then_append_again() { #[derive(codec::Encode, codec::Decode)] @@ -1499,7 +1568,7 @@ mod tests { // Then only initialization item and second (committed) item should persist. { - let ext = Ext::new(&mut overlay, backend, None); + let mut ext = Ext::new(&mut overlay, backend, None); assert_eq!( ext.storage(key.as_slice()), Some(vec![Item::InitializationItem, Item::CommittedItem].encode()), diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index 601bc2e29198561d501e1f20b11c2323c0064989..c478983e979af440a409199934c641eb01492675 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -21,11 +21,15 @@ use super::{Extrinsics, StorageKey, StorageValue}; #[cfg(not(feature = "std"))] use alloc::collections::btree_set::BTreeSet as Set; +use codec::{Compact, CompactLen}; #[cfg(feature = "std")] use std::collections::HashSet as Set; -use crate::warn; -use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use crate::{ext::StorageAppend, warn}; +use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec::Vec, +}; use core::hash::Hash; use smallvec::SmallVec; @@ -86,10 +90,97 @@ impl Default for OverlayedEntry { } /// History of value, with removal support. -pub type OverlayedValue = OverlayedEntry>; +pub type OverlayedValue = OverlayedEntry; + +/// Content in an overlay for a given transactional depth. +#[derive(Debug, Clone, Default)] +#[cfg_attr(test, derive(PartialEq))] +pub enum StorageEntry { + /// The storage entry should be set to the stored value. + Set(StorageValue), + /// The storage entry should be removed. + #[default] + Remove, + /// The storage entry was appended to. + /// + /// This assumes that the storage entry is encoded as a SCALE list. This means that it is + /// prefixed with a `Compact` that reprensents the length, followed by all the encoded + /// elements. + Append { + /// The value of the storage entry. + /// + /// This may or may not be prefixed by the length, depending on the materialized length. + data: StorageValue, + /// Current number of elements stored in data. + current_length: u32, + /// The number of elements as stored in the prefixed length in `data`. + /// + /// If `None`, than `data` is not yet prefixed with the length. + materialized_length: Option, + /// The size of `data` in the parent transactional layer. + /// + /// Only set when the parent layer is in `Append` state. + parent_size: Option, + }, +} + +impl StorageEntry { + /// Convert to an [`Option`]. + pub(super) fn to_option(mut self) -> Option { + self.materialize_in_place(); + match self { + StorageEntry::Append { data, .. } | StorageEntry::Set(data) => Some(data), + StorageEntry::Remove => None, + } + } + + /// Return as an [`Option`]. + fn as_option(&mut self) -> Option<&StorageValue> { + self.materialize_in_place(); + match self { + StorageEntry::Append { data, .. } | StorageEntry::Set(data) => Some(data), + StorageEntry::Remove => None, + } + } + + /// Materialize the internal state and cache the resulting materialized value. + fn materialize_in_place(&mut self) { + if let StorageEntry::Append { data, materialized_length, current_length, .. } = self { + let current_length = *current_length; + if materialized_length.map_or(false, |m| m == current_length) { + return + } + StorageAppend::new(data).replace_length(*materialized_length, current_length); + *materialized_length = Some(current_length); + } + } + + /// Materialize the internal state. + #[cfg(test)] + pub(crate) fn materialize(&self) -> Option> { + use alloc::borrow::Cow; + + match self { + StorageEntry::Append { data, materialized_length, current_length, .. } => { + let current_length = *current_length; + if materialized_length.map_or(false, |m| m == current_length) { + Some(Cow::Borrowed(data.as_ref())) + } else { + let mut data = data.clone(); + StorageAppend::new(&mut data) + .replace_length(*materialized_length, current_length); + + Some(data.into()) + } + }, + StorageEntry::Remove => None, + StorageEntry::Set(e) => Some(Cow::Borrowed(e.as_ref())), + } + } +} /// Change set for basic key value with extrinsics index recording and removal support. -pub type OverlayedChangeSet = OverlayedMap>; +pub type OverlayedChangeSet = OverlayedMap; /// Holds a set of changes with the ability modify them using nested transactions. #[derive(Debug, Clone)] @@ -120,7 +211,7 @@ impl Default for OverlayedMap { } #[cfg(feature = "std")] -impl From for OverlayedMap> { +impl From for OverlayedMap { fn from(storage: sp_core::storage::StorageMap) -> Self { Self { changes: storage @@ -130,7 +221,7 @@ impl From for OverlayedMap OverlayedEntry { /// /// This makes sure that the old version is not overwritten and can be properly /// rolled back when required. - fn set(&mut self, value: V, first_write_in_tx: bool, at_extrinsic: Option) { + fn set_offchain(&mut self, value: V, first_write_in_tx: bool, at_extrinsic: Option) { if first_write_in_tx || self.transactions.is_empty() { self.transactions.push(InnerValue { value, extrinsics: Default::default() }); } else { @@ -202,10 +293,223 @@ impl OverlayedEntry { } } -impl OverlayedEntry> { +/// Restore the `current_data` from an [`StorageEntry::Append`] back to the parent. +/// +/// When creating a new transaction layer from an appended entry, the `data` will be moved to +/// prevent extra allocations. So, we need to move back the `data` to the parent layer when there is +/// a roll back or the entry is set to some different value. This functions puts back the data to +/// the `parent` and truncates any extra elements that got added in the current layer. +/// +/// The current and the `parent` layer need to be [`StorageEntry::Append`] or otherwise the function +/// is a no-op. +fn restore_append_to_parent( + parent: &mut StorageEntry, + mut current_data: Vec, + current_materialized: Option, + mut target_parent_size: usize, +) { + match parent { + StorageEntry::Append { + data: parent_data, + materialized_length: parent_materialized, + .. + } => { + // Forward the materialized length to the parent with the data. Next time when + // materializing the value, the length will be corrected. This prevents doing a + // potential allocation here. + + let prev = parent_materialized.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let new = current_materialized.map(|l| Compact::::compact_len(&l)).unwrap_or(0); + let delta = new.abs_diff(prev); + if prev >= new { + target_parent_size -= delta; + } else { + target_parent_size += delta; + } + *parent_materialized = current_materialized; + + // Truncate the data to remove any extra elements + current_data.truncate(target_parent_size); + *parent_data = current_data; + }, + _ => { + // No value or a simple value, no need to restore + }, + } +} + +impl OverlayedEntry { + /// Writes a new version of a value. + /// + /// This makes sure that the old version is not overwritten and can be properly + /// rolled back when required. + fn set( + &mut self, + value: Option, + first_write_in_tx: bool, + at_extrinsic: Option, + ) { + let value = value.map_or_else(|| StorageEntry::Remove, StorageEntry::Set); + + if first_write_in_tx || self.transactions.is_empty() { + self.transactions.push(InnerValue { value, extrinsics: Default::default() }); + } else { + let mut old_value = self.value_mut(); + + let set_prev = if let StorageEntry::Append { + data, + current_length: _, + materialized_length, + parent_size, + } = &mut old_value + { + parent_size + .map(|parent_size| (core::mem::take(data), *materialized_length, parent_size)) + } else { + None + }; + + *old_value = value; + + if let Some((data, current_materialized, parent_size)) = set_prev { + let transactions = self.transactions.len(); + + debug_assert!(transactions >= 2); + let parent = self + .transactions + .get_mut(transactions - 2) + .expect("`set_prev` is only `Some(_)`, if the value came from parent; qed"); + restore_append_to_parent( + &mut parent.value, + data, + current_materialized, + parent_size, + ); + } + } + + if let Some(extrinsic) = at_extrinsic { + self.transaction_extrinsics_mut().insert(extrinsic); + } + } + + /// Append content to a value, updating a prefixed compact encoded length. + /// + /// This makes sure that the old version is not overwritten and can be properly + /// rolled back when required. + /// This avoid copying value from previous transaction. + fn append( + &mut self, + element: StorageValue, + first_write_in_tx: bool, + init: impl Fn() -> StorageValue, + at_extrinsic: Option, + ) { + if self.transactions.is_empty() { + let mut init_value = init(); + + let mut append = StorageAppend::new(&mut init_value); + + // Either the init value is a SCALE list like value to that the `element` gets appended + // or the value is reset to `[element]`. + let (data, current_length, materialized_length) = + if let Some(len) = append.extract_length() { + append.append_raw(element); + + (init_value, len + 1, Some(len)) + } else { + (element, 1, None) + }; + + self.transactions.push(InnerValue { + value: StorageEntry::Append { + data, + current_length, + materialized_length, + parent_size: None, + }, + extrinsics: Default::default(), + }); + } else if first_write_in_tx { + let parent = self.value_mut(); + let (data, current_length, materialized_length, parent_size) = match parent { + StorageEntry::Remove => (element, 1, None, None), + StorageEntry::Append { data, current_length, materialized_length, .. } => { + let parent_len = data.len(); + let mut data_buf = core::mem::take(data); + StorageAppend::new(&mut data_buf).append_raw(element); + (data_buf, *current_length + 1, *materialized_length, Some(parent_len)) + }, + StorageEntry::Set(prev) => { + // For compatibility: append if there is a encoded length, overwrite + // with value otherwhise. + if let Some(current_length) = StorageAppend::new(prev).extract_length() { + // The `prev` is cloned here, but it could be optimized to not do the clone + // here as it is done for `Append` above. + let mut data = prev.clone(); + StorageAppend::new(&mut data).append_raw(element); + (data, current_length + 1, Some(current_length), None) + } else { + // overwrite, same as empty case. + (element, 1, None, None) + } + }, + }; + + self.transactions.push(InnerValue { + value: StorageEntry::Append { + data, + current_length, + materialized_length, + parent_size, + }, + extrinsics: Default::default(), + }); + } else { + // not first transaction write + let old_value = self.value_mut(); + let replace = match old_value { + StorageEntry::Remove => Some((element, 1, None)), + StorageEntry::Set(data) => { + // Note that when the data here is not initialized with append, + // and still starts with a valid compact u32 we can have totally broken + // encoding. + let mut append = StorageAppend::new(data); + + // For compatibility: append if there is a encoded length, overwrite + // with value otherwhise. + if let Some(current_length) = append.extract_length() { + append.append_raw(element); + Some((core::mem::take(data), current_length + 1, Some(current_length))) + } else { + Some((element, 1, None)) + } + }, + StorageEntry::Append { data, current_length, .. } => { + StorageAppend::new(data).append_raw(element); + *current_length += 1; + None + }, + }; + + if let Some((data, current_length, materialized_length)) = replace { + *old_value = StorageEntry::Append { + data, + current_length, + materialized_length, + parent_size: None, + }; + } + } + + if let Some(extrinsic) = at_extrinsic { + self.transaction_extrinsics_mut().insert(extrinsic); + } + } + /// The value as seen by the current transaction. - pub fn value(&self) -> Option<&StorageValue> { - self.value_ref().as_ref() + pub fn value(&mut self) -> Option<&StorageValue> { + self.value_mut().as_option() } } @@ -238,20 +542,20 @@ impl OverlayedMap { } /// Get an optional reference to the value stored for the specified key. - pub fn get(&self, key: &Q) -> Option<&OverlayedEntry> + pub fn get(&mut self, key: &Q) -> Option<&mut OverlayedEntry> where K: core::borrow::Borrow, Q: Ord + ?Sized, { - self.changes.get(key) + self.changes.get_mut(key) } /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn set(&mut self, key: K, value: V, at_extrinsic: Option) { + pub fn set_offchain(&mut self, key: K, value: V, at_extrinsic: Option) { let overlayed = self.changes.entry(key.clone()).or_default(); - overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); + overlayed.set_offchain(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); } /// Get a list of all changes as seen by current transaction. @@ -259,6 +563,11 @@ impl OverlayedMap { self.changes.iter() } + /// Get a list of all changes as seen by current transaction. + pub fn changes_mut(&mut self) -> impl Iterator)> { + self.changes.iter_mut() + } + /// Get a list of all changes as seen by current transaction, consumes /// the overlay. pub fn into_changes(self) -> impl Iterator)> { @@ -298,7 +607,7 @@ impl OverlayedMap { /// /// This rollbacks all dangling transaction left open by the runtime. /// Calling this while already outside the runtime will return an error. - pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { + pub fn exit_runtime_offchain(&mut self) -> Result<(), NotInRuntime> { if let ExecutionMode::Client = self.execution_mode { return Err(NotInRuntime) } @@ -310,7 +619,7 @@ impl OverlayedMap { ); } while self.has_open_runtime_transactions() { - self.rollback_transaction() + self.rollback_transaction_offchain() .expect("The loop condition checks that the transaction depth is > 0; qed"); } Ok(()) @@ -331,24 +640,24 @@ impl OverlayedMap { /// /// Any changes made during that transaction are discarded. Returns an error if /// there is no open transaction that can be rolled back. - pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { - self.close_transaction(true) + pub fn rollback_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction_offchain(true) } /// Commit the last transaction started by `start_transaction`. /// /// Any changes made during that transaction are committed. Returns an error if /// there is no open transaction that can be committed. - pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { - self.close_transaction(false) + pub fn commit_transaction_offchain(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction_offchain(false) } - fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { + fn close_transaction_offchain(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { // runtime is not allowed to close transactions started by the client - if let ExecutionMode::Runtime = self.execution_mode { - if !self.has_open_runtime_transactions() { - return Err(NoOpenTransaction) - } + if matches!(self.execution_mode, ExecutionMode::Runtime) && + !self.has_open_runtime_transactions() + { + return Err(NoOpenTransaction) } for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { @@ -398,32 +707,176 @@ impl OverlayedMap { } impl OverlayedChangeSet { - /// Get a mutable reference for a value. + /// Rollback the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are discarded. Returns an error if + /// there is no open transaction that can be rolled back. + pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction(true) + } + + /// Commit the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are committed. Returns an error if + /// there is no open transaction that can be committed. + pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction(false) + } + + fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { + // runtime is not allowed to close transactions started by the client + if matches!(self.execution_mode, ExecutionMode::Runtime) && + !self.has_open_runtime_transactions() + { + return Err(NoOpenTransaction) + } + + for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { + let overlayed = self.changes.get_mut(&key).expect( + "\ + A write to an OverlayedValue is recorded in the dirty key set. Before an + OverlayedValue is removed, its containing dirty set is removed. This + function is only called for keys that are in the dirty set. qed\ + ", + ); + + if rollback { + match overlayed.pop_transaction().value { + StorageEntry::Append { + data, + materialized_length, + parent_size: Some(parent_size), + .. + } => { + debug_assert!(!overlayed.transactions.is_empty()); + restore_append_to_parent( + overlayed.value_mut(), + data, + materialized_length, + parent_size, + ); + }, + _ => (), + } + + // We need to remove the key as an `OverlayValue` with no transactions + // violates its invariant of always having at least one transaction. + if overlayed.transactions.is_empty() { + self.changes.remove(&key); + } + } else { + let has_predecessor = if let Some(dirty_keys) = self.dirty_keys.last_mut() { + // Not the last tx: Did the previous tx write to this key? + !dirty_keys.insert(key) + } else { + // Last tx: Is there already a value in the committed set? + // Check against one rather than empty because the current tx is still + // in the list as it is popped later in this function. + overlayed.transactions.len() > 1 + }; + + // We only need to merge if there is an pre-existing value. It may be a value from + // the previous transaction or a value committed without any open transaction. + if has_predecessor { + let mut committed_tx = overlayed.pop_transaction(); + let mut merge_appends = false; + + // consecutive appends need to keep past `parent_size` value. + if let StorageEntry::Append { parent_size, .. } = &mut committed_tx.value { + if parent_size.is_some() { + let parent = overlayed.value_mut(); + if let StorageEntry::Append { parent_size: keep_me, .. } = parent { + merge_appends = true; + *parent_size = *keep_me; + } + } + } + + if merge_appends { + *overlayed.value_mut() = committed_tx.value; + } else { + let removed = core::mem::replace(overlayed.value_mut(), committed_tx.value); + // The transaction being commited is not an append operation. However, the + // value being overwritten in the previous transaction might be an append + // that needs to be merged with its parent. We only need to handle `Append` + // here because `Set` and `Remove` can directly overwrite previous + // operations. + if let StorageEntry::Append { + parent_size, data, materialized_length, .. + } = removed + { + if let Some(parent_size) = parent_size { + let transactions = overlayed.transactions.len(); + + // info from replaced head so len is at least one + // and parent_size implies a parent transaction + // so length is at least two. + debug_assert!(transactions >= 2); + if let Some(parent) = + overlayed.transactions.get_mut(transactions - 2) + { + restore_append_to_parent( + &mut parent.value, + data, + materialized_length, + parent_size, + ) + } + } + } + } + + overlayed.transaction_extrinsics_mut().extend(committed_tx.extrinsics); + } + } + } + + Ok(()) + } + + /// Call this when control returns from the runtime. + /// + /// This commits all dangling transaction left open by the runtime. + /// Calling this while already outside the runtime will return an error. + pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { + if matches!(self.execution_mode, ExecutionMode::Client) { + return Err(NotInRuntime) + } + + self.execution_mode = ExecutionMode::Client; + if self.has_open_runtime_transactions() { + warn!( + "{} storage transactions are left open by the runtime. Those will be rolled back.", + self.transaction_depth() - self.num_client_transactions, + ); + } + while self.has_open_runtime_transactions() { + self.rollback_transaction() + .expect("The loop condition checks that the transaction depth is > 0; qed"); + } + + Ok(()) + } + + /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - #[must_use = "A change was registered, so this value MUST be modified."] - pub fn modify( + pub fn set(&mut self, key: StorageKey, value: Option, at_extrinsic: Option) { + let overlayed = self.changes.entry(key.clone()).or_default(); + overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); + } + + /// Append bytes to an existing content. + pub fn append_storage( &mut self, key: StorageKey, + value: StorageValue, init: impl Fn() -> StorageValue, at_extrinsic: Option, - ) -> &mut Option { + ) { let overlayed = self.changes.entry(key.clone()).or_default(); let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); - let clone_into_new_tx = if let Some(tx) = overlayed.transactions.last() { - if first_write_in_tx { - Some(tx.value.clone()) - } else { - None - } - } else { - Some(Some(init())) - }; - - if let Some(cloned) = clone_into_new_tx { - overlayed.set(cloned, first_write_in_tx, at_extrinsic); - } - overlayed.value_mut() + overlayed.append(value, first_write_in_tx, init, at_extrinsic); } /// Set all values to deleted which are matched by the predicate. @@ -436,7 +889,7 @@ impl OverlayedChangeSet { ) -> u32 { let mut count = 0; for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { - if val.value_ref().is_some() { + if matches!(val.value_ref(), StorageEntry::Set(..) | StorageEntry::Append { .. }) { count += 1; } val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); @@ -445,10 +898,13 @@ impl OverlayedChangeSet { } /// Get the iterator over all changes that follow the supplied `key`. - pub fn changes_after(&self, key: &[u8]) -> impl Iterator { + pub fn changes_after( + &mut self, + key: &[u8], + ) -> impl Iterator { use core::ops::Bound; let range = (Bound::Excluded(key), Bound::Unbounded); - self.changes.range::<[u8], _>(range).map(|(k, v)| (k.as_slice(), v)) + self.changes.range_mut::<[u8], _>(range).map(|(k, v)| (k.as_slice(), v)) } } @@ -460,18 +916,19 @@ mod test { type Changes<'a> = Vec<(&'a [u8], (Option<&'a [u8]>, Vec))>; type Drained<'a> = Vec<(&'a [u8], Option<&'a [u8]>)>; - fn assert_changes(is: &OverlayedChangeSet, expected: &Changes) { + fn assert_changes(is: &mut OverlayedChangeSet, expected: &Changes) { let is: Changes = is - .changes() + .changes_mut() .map(|(k, v)| { - (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().into_iter().collect())) + let extrinsics = v.extrinsics().into_iter().collect(); + (k.as_ref(), (v.value().map(AsRef::as_ref), extrinsics)) }) .collect(); assert_eq!(&is, expected); } fn assert_drained_changes(is: OverlayedChangeSet, expected: Changes) { - let is = is.drain_committed().collect::>(); + let is = is.drain_committed().map(|(k, v)| (k, v.to_option())).collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.0.map(From::from))) @@ -480,7 +937,7 @@ mod test { } fn assert_drained(is: OverlayedChangeSet, expected: Drained) { - let is = is.drain_committed().collect::>(); + let is = is.drain_committed().map(|(k, v)| (k, v.to_option())).collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.map(From::from))) @@ -535,7 +992,7 @@ mod test { (b"key7", (Some(b"val7-rolled"), vec![77])), (b"key99", (Some(b"val99"), vec![99])), ]; - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); // this should be no-op changeset.start_transaction(); @@ -546,7 +1003,7 @@ mod test { assert_eq!(changeset.transaction_depth(), 3); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 2); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); // roll back our first transactions that actually contains something changeset.rollback_transaction().unwrap(); @@ -558,11 +1015,11 @@ mod test { (b"key42", (Some(b"val42"), vec![42])), (b"key99", (Some(b"val99"), vec![99])), ]; - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); } @@ -598,7 +1055,7 @@ mod test { (b"key7", (Some(b"val7-rolled"), vec![77])), (b"key99", (Some(b"val99"), vec![99])), ]; - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); // this should be no-op changeset.start_transaction(); @@ -609,35 +1066,46 @@ mod test { assert_eq!(changeset.transaction_depth(), 3); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 2); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 1); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); changeset.rollback_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); let rolled_back: Changes = vec![(b"key0", (Some(b"val0-1"), vec![1, 10])), (b"key1", (Some(b"val1"), vec![1]))]; - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); } #[test] - fn modify_works() { + fn append_works() { + use codec::Encode; let mut changeset = OverlayedChangeSet::default(); assert_eq!(changeset.transaction_depth(), 0); - let init = || b"valinit".to_vec(); + let init = || vec![b"valinit".to_vec()].encode(); // committed set - changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(0)); + let val0 = vec![b"val0".to_vec()].encode(); + changeset.set(b"key0".to_vec(), Some(val0.clone()), Some(0)); changeset.set(b"key1".to_vec(), None, Some(1)); - let val = changeset.modify(b"key3".to_vec(), init, Some(3)); - assert_eq!(val, &Some(b"valinit".to_vec())); - val.as_mut().unwrap().extend_from_slice(b"-modified"); + let all_changes: Changes = + vec![(b"key0", (Some(val0.as_slice()), vec![0])), (b"key1", (None, vec![1]))]; + + assert_changes(&mut changeset, &all_changes); + changeset.append_storage(b"key3".to_vec(), b"-modified".to_vec().encode(), init, Some(3)); + let val3 = vec![b"valinit".to_vec(), b"-modified".to_vec()].encode(); + let all_changes: Changes = vec![ + (b"key0", (Some(val0.as_slice()), vec![0])), + (b"key1", (None, vec![1])), + (b"key3", (Some(val3.as_slice()), vec![3])), + ]; + assert_changes(&mut changeset, &all_changes); changeset.start_transaction(); assert_eq!(changeset.transaction_depth(), 1); @@ -645,39 +1113,75 @@ mod test { assert_eq!(changeset.transaction_depth(), 2); // non existing value -> init value should be returned - let val = changeset.modify(b"key2".to_vec(), init, Some(2)); - assert_eq!(val, &Some(b"valinit".to_vec())); - val.as_mut().unwrap().extend_from_slice(b"-modified"); + changeset.append_storage(b"key3".to_vec(), b"-twice".to_vec().encode(), init, Some(15)); - // existing value should be returned by modify - let val = changeset.modify(b"key0".to_vec(), init, Some(10)); - assert_eq!(val, &Some(b"val0".to_vec())); - val.as_mut().unwrap().extend_from_slice(b"-modified"); + // non existing value -> init value should be returned + changeset.append_storage(b"key2".to_vec(), b"-modified".to_vec().encode(), init, Some(2)); + // existing value should be reuse on append + changeset.append_storage(b"key0".to_vec(), b"-modified".to_vec().encode(), init, Some(10)); // should work for deleted keys - let val = changeset.modify(b"key1".to_vec(), init, Some(20)); - assert_eq!(val, &None); - *val = Some(b"deleted-modified".to_vec()); + changeset.append_storage( + b"key1".to_vec(), + b"deleted-modified".to_vec().encode(), + init, + Some(20), + ); + let val0_2 = vec![b"val0".to_vec(), b"-modified".to_vec()].encode(); + let val3_2 = vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec()].encode(); + let val1 = vec![b"deleted-modified".to_vec()].encode(); + let all_changes: Changes = vec![ + (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), + (b"key1", (Some(val1.as_slice()), vec![1, 20])), + (b"key2", (Some(val3.as_slice()), vec![2])), + (b"key3", (Some(val3_2.as_slice()), vec![3, 15])), + ]; + assert_changes(&mut changeset, &all_changes); + + changeset.start_transaction(); + let val3_3 = + vec![b"valinit".to_vec(), b"-modified".to_vec(), b"-twice".to_vec(), b"-2".to_vec()] + .encode(); + changeset.append_storage(b"key3".to_vec(), b"-2".to_vec().encode(), init, Some(21)); + let all_changes2: Changes = vec![ + (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), + (b"key1", (Some(val1.as_slice()), vec![1, 20])), + (b"key2", (Some(val3.as_slice()), vec![2])), + (b"key3", (Some(val3_3.as_slice()), vec![3, 15, 21])), + ]; + assert_changes(&mut changeset, &all_changes2); + changeset.rollback_transaction().unwrap(); + assert_changes(&mut changeset, &all_changes); + changeset.start_transaction(); + let val3_4 = vec![ + b"valinit".to_vec(), + b"-modified".to_vec(), + b"-twice".to_vec(), + b"-thrice".to_vec(), + ] + .encode(); + changeset.append_storage(b"key3".to_vec(), b"-thrice".to_vec().encode(), init, Some(25)); let all_changes: Changes = vec![ - (b"key0", (Some(b"val0-modified"), vec![0, 10])), - (b"key1", (Some(b"deleted-modified"), vec![1, 20])), - (b"key2", (Some(b"valinit-modified"), vec![2])), - (b"key3", (Some(b"valinit-modified"), vec![3])), + (b"key0", (Some(val0_2.as_slice()), vec![0, 10])), + (b"key1", (Some(val1.as_slice()), vec![1, 20])), + (b"key2", (Some(val3.as_slice()), vec![2])), + (b"key3", (Some(val3_4.as_slice()), vec![3, 15, 25])), ]; - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); + changeset.commit_transaction().unwrap(); changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 1); - assert_changes(&changeset, &all_changes); + assert_changes(&mut changeset, &all_changes); changeset.rollback_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); let rolled_back: Changes = vec![ - (b"key0", (Some(b"val0"), vec![0])), + (b"key0", (Some(val0.as_slice()), vec![0])), (b"key1", (None, vec![1])), - (b"key3", (Some(b"valinit-modified"), vec![3])), + (b"key3", (Some(val3.as_slice()), vec![3])), ]; - assert_changes(&changeset, &rolled_back); + assert_changes(&mut changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); } @@ -695,7 +1199,7 @@ mod test { changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)); assert_changes( - &changeset, + &mut changeset, &vec![ (b"del1", (None, vec![3, 5])), (b"del2", (None, vec![4, 5])), @@ -707,7 +1211,7 @@ mod test { changeset.rollback_transaction().unwrap(); assert_changes( - &changeset, + &mut changeset, &vec![ (b"del1", (Some(b"delval1"), vec![3])), (b"del2", (Some(b"delval2"), vec![4])), @@ -850,4 +1354,72 @@ mod test { assert_eq!(changeset.exit_runtime(), Ok(())); assert_eq!(changeset.exit_runtime(), Err(NotInRuntime)); } + + #[test] + fn restore_append_to_parent() { + use codec::{Compact, Encode}; + let mut changeset = OverlayedChangeSet::default(); + let key: Vec = b"akey".into(); + + let from = 50; // 1 byte len + let to = 100; // 2 byte len + for i in 0..from { + changeset.append_storage(key.clone(), vec![i], Default::default, None); + } + + // materialized + let encoded = changeset.get(&key).unwrap().value().unwrap(); + let encoded_from_len = Compact(from as u32).encode(); + assert_eq!(encoded_from_len.len(), 1); + assert!(encoded.starts_with(&encoded_from_len[..])); + let encoded_from = encoded.clone(); + + changeset.start_transaction(); + + for i in from..to { + changeset.append_storage(key.clone(), vec![i], Default::default, None); + } + + // materialized + let encoded = changeset.get(&key).unwrap().value().unwrap(); + let encoded_to_len = Compact(to as u32).encode(); + assert_eq!(encoded_to_len.len(), 2); + assert!(encoded.starts_with(&encoded_to_len[..])); + + changeset.rollback_transaction().unwrap(); + + let encoded = changeset.get(&key).unwrap().value().unwrap(); + assert_eq!(&encoded_from, encoded); + } + + /// First we have some `Set` operation with a valid SCALE list. Then we append data and rollback + /// afterwards. + #[test] + fn restore_initial_set_after_append_to_parent() { + use codec::{Compact, Encode}; + let mut changeset = OverlayedChangeSet::default(); + let key: Vec = b"akey".into(); + + let initial_data = vec![1u8; 50].encode(); + + changeset.set(key.clone(), Some(initial_data.clone()), None); + + changeset.start_transaction(); + + // Append until we require 2 bytes for the length prefix. + for i in 0..50 { + changeset.append_storage(key.clone(), vec![i], Default::default, None); + } + + // Materialize the value. + let encoded = changeset.get(&key).unwrap().value().unwrap(); + let encoded_to_len = Compact(100u32).encode(); + assert_eq!(encoded_to_len.len(), 2); + assert!(encoded.starts_with(&encoded_to_len[..])); + + changeset.rollback_transaction().unwrap(); + + let encoded = changeset.get(&key).unwrap().value().unwrap(); + assert_eq!(&initial_data, encoded); + } } diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index d6fc404e84fb5256656c5849d8f52894f37bfa02..c2dc637bc71a70dbef3a55abf658dbd1dd889db5 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -289,7 +289,7 @@ impl OverlayedChanges { /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. - pub fn storage(&self, key: &[u8]) -> Option> { + pub fn storage(&mut self, key: &[u8]) -> Option> { self.top.get(key).map(|x| { let value = x.value(); let size_read = value.map(|x| x.len() as u64).unwrap_or(0); @@ -304,30 +304,11 @@ impl OverlayedChanges { self.storage_transaction_cache = None; } - /// Returns mutable reference to current value. - /// If there is no value in the overlay, the given callback is used to initiate the value. - /// Warning this function registers a change, so the mutable reference MUST be modified. - /// - /// Can be rolled back or committed when called inside a transaction. - #[must_use = "A change was registered, so this value MUST be modified."] - pub fn value_mut_or_insert_with( - &mut self, - key: &[u8], - init: impl Fn() -> StorageValue, - ) -> &mut StorageValue { - self.mark_dirty(); - - let value = self.top.modify(key.to_vec(), init, self.extrinsic_index()); - - // if the value was deleted initialise it back with an empty vec - value.get_or_insert_with(StorageValue::default) - } - /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. - pub fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { - let map = self.children.get(child_info.storage_key())?; + pub fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { + let map = self.children.get_mut(child_info.storage_key())?; let value = map.0.get(key)?.value(); let size_read = value.map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_read_modified(size_read); @@ -342,7 +323,21 @@ impl OverlayedChanges { let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); - self.top.set(key, val, self.extrinsic_index()); + let extrinsic_index = self.extrinsic_index(); + self.top.set(key, val, extrinsic_index); + } + + /// Append a element to storage, init with existing value if first write. + pub fn append_storage( + &mut self, + key: StorageKey, + element: StorageValue, + init: impl Fn() -> StorageValue, + ) { + let extrinsic_index = self.extrinsic_index(); + let size_write = element.len() as u64; + self.stats.tally_write_overlay(size_write); + self.top.append_storage(key, element, init, extrinsic_index); } /// Set a new value for the specified key and child. @@ -396,7 +391,8 @@ impl OverlayedChanges { pub fn clear_prefix(&mut self, prefix: &[u8]) -> u32 { self.mark_dirty(); - self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index()) + let extrinsic_index = self.extrinsic_index(); + self.top.clear_where(|key, _| key.starts_with(prefix), extrinsic_index) } /// Removes all key-value pairs which keys share the given prefix. @@ -457,7 +453,7 @@ impl OverlayedChanges { }); self.offchain .overlay_mut() - .rollback_transaction() + .rollback_transaction_offchain() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -475,7 +471,7 @@ impl OverlayedChanges { } self.offchain .overlay_mut() - .commit_transaction() + .commit_transaction_offchain() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -511,7 +507,7 @@ impl OverlayedChanges { } self.offchain .overlay_mut() - .exit_runtime() + .exit_runtime_offchain() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -535,11 +531,24 @@ impl OverlayedChanges { self.children.values().map(|v| (v.0.changes(), &v.1)) } + /// Get an iterator over all child changes as seen by the current transaction. + pub fn children_mut( + &mut self, + ) -> impl Iterator, &ChildInfo)> + { + self.children.values_mut().map(|v| (v.0.changes_mut(), &v.1)) + } + /// Get an iterator over all top changes as been by the current transaction. pub fn changes(&self) -> impl Iterator { self.top.changes() } + /// Get an iterator over all top changes as been by the current transaction. + pub fn changes_mut(&mut self) -> impl Iterator { + self.top.changes_mut() + } + /// Get an optional iterator over all child changes stored under the supplied key. pub fn child_changes( &self, @@ -548,6 +557,16 @@ impl OverlayedChanges { self.children.get(key).map(|(overlay, info)| (overlay.changes(), info)) } + /// Get an optional iterator over all child changes stored under the supplied key. + pub fn child_changes_mut( + &mut self, + key: &[u8], + ) -> Option<(impl Iterator, &ChildInfo)> { + self.children + .get_mut(key) + .map(|(overlay, info)| (overlay.changes_mut(), &*info)) + } + /// Get an list of all index operations. pub fn transaction_index_ops(&self) -> &[IndexOperation] { &self.transaction_index_ops @@ -575,11 +594,12 @@ impl OverlayedChanges { }; use core::mem::take; - let main_storage_changes = take(&mut self.top).drain_committed(); - let child_storage_changes = take(&mut self.children) - .into_iter() - .map(|(key, (val, info))| (key, (val.drain_committed(), info))); - + let main_storage_changes = + take(&mut self.top).drain_committed().map(|(k, v)| (k, v.to_option())); + let child_storage_changes = + take(&mut self.children).into_iter().map(|(key, (val, info))| { + (key, (val.drain_committed().map(|(k, v)| (k, v.to_option())), info)) + }); let offchain_storage_changes = self.offchain_drain_committed().collect(); #[cfg(feature = "std")] @@ -610,7 +630,7 @@ impl OverlayedChanges { /// set this index before first and unset after last extrinsic is executed. /// Changes that are made outside of extrinsics, are marked with /// `NO_EXTRINSIC_INDEX` index. - fn extrinsic_index(&self) -> Option { + fn extrinsic_index(&mut self) -> Option { self.collect_extrinsics.then(|| { self.storage(EXTRINSIC_INDEX) .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) @@ -634,10 +654,12 @@ impl OverlayedChanges { return (cache.transaction_storage_root, true) } - let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); - let child_delta = self.children().map(|(changes, info)| { - (info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))) - }); + let delta = self.top.changes_mut().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); + + let child_delta = self + .children + .values_mut() + .map(|v| (&v.1, v.0.changes_mut().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))))); let (root, transaction) = backend.full_storage_root(delta, child_delta, state_version); @@ -677,7 +699,7 @@ impl OverlayedChanges { return Ok((root, true)) } - let root = if let Some((changes, info)) = self.child_changes(storage_key) { + let root = if let Some((changes, info)) = self.child_changes_mut(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); Some(backend.child_storage_root(info, delta, state_version)) } else { @@ -711,19 +733,19 @@ impl OverlayedChanges { /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) /// alongside its value. - pub fn iter_after(&self, key: &[u8]) -> impl Iterator { + pub fn iter_after(&mut self, key: &[u8]) -> impl Iterator { self.top.changes_after(key) } /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) /// alongside its value for the given `storage_key` child. pub fn child_iter_after( - &self, + &mut self, storage_key: &[u8], key: &[u8], - ) -> impl Iterator { + ) -> impl Iterator { self.children - .get(storage_key) + .get_mut(storage_key) .map(|(overlay, _)| overlay.changes_after(key)) .into_iter() .flatten() @@ -858,7 +880,11 @@ mod tests { use sp_core::{traits::Externalities, Blake2Hasher}; use std::collections::BTreeMap; - fn assert_extrinsics(overlay: &OverlayedChangeSet, key: impl AsRef<[u8]>, expected: Vec) { + fn assert_extrinsics( + overlay: &mut OverlayedChangeSet, + key: impl AsRef<[u8]>, + expected: Vec, + ) { assert_eq!( overlay.get(key.as_ref()).unwrap().extrinsics().into_iter().collect::>(), expected @@ -1049,9 +1075,9 @@ mod tests { overlay.set_extrinsic_index(2); overlay.set_storage(vec![1], Some(vec![6])); - assert_extrinsics(&overlay.top, vec![1], vec![0, 2]); - assert_extrinsics(&overlay.top, vec![3], vec![1]); - assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2]); + assert_extrinsics(&mut overlay.top, vec![3], vec![1]); + assert_extrinsics(&mut overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); overlay.start_transaction(); @@ -1061,15 +1087,15 @@ mod tests { overlay.set_extrinsic_index(4); overlay.set_storage(vec![1], Some(vec![8])); - assert_extrinsics(&overlay.top, vec![1], vec![0, 2, 4]); - assert_extrinsics(&overlay.top, vec![3], vec![1, 3]); - assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2, 4]); + assert_extrinsics(&mut overlay.top, vec![3], vec![1, 3]); + assert_extrinsics(&mut overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); overlay.rollback_transaction().unwrap(); - assert_extrinsics(&overlay.top, vec![1], vec![0, 2]); - assert_extrinsics(&overlay.top, vec![3], vec![1]); - assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + assert_extrinsics(&mut overlay.top, vec![1], vec![0, 2]); + assert_extrinsics(&mut overlay.top, vec![3], vec![1]); + assert_extrinsics(&mut overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); } #[test] diff --git a/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs b/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs index 1e6965e874759e30b415783a560192af8412027d..517a51b02693c0528cf9475972a259ab72fd5f59 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs @@ -42,7 +42,7 @@ impl OffchainOverlayedChanges { } /// Iterate over all key value pairs by reference. - pub fn iter(&self) -> impl Iterator { + pub fn iter(&mut self) -> impl Iterator { self.0.changes().map(|kv| (kv.0, kv.1.value_ref())) } @@ -53,14 +53,16 @@ impl OffchainOverlayedChanges { /// Remove a key and its associated value from the offchain database. pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { - let _ = self - .0 - .set((prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::Remove, None); + let _ = self.0.set_offchain( + (prefix.to_vec(), key.to_vec()), + OffchainOverlayedChange::Remove, + None, + ); } /// Set the value associated with a key under a prefix to the value provided. pub fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - let _ = self.0.set( + let _ = self.0.set_offchain( (prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::SetValue(value.to_vec()), None, @@ -68,7 +70,7 @@ impl OffchainOverlayedChanges { } /// Obtain a associated value to the given key in storage with prefix. - pub fn get(&self, prefix: &[u8], key: &[u8]) -> Option { + pub fn get(&mut self, prefix: &[u8], key: &[u8]) -> Option { let key = (prefix.to_vec(), key.to_vec()); self.0.get(&key).map(|entry| entry.value_ref()).cloned() } diff --git a/substrate/primitives/state-machine/src/read_only.rs b/substrate/primitives/state-machine/src/read_only.rs index 2056bf9866358d4d5086fbdf67a7385e7532092f..b78d17138b0ff5c968a858ab7515b5d82a9673c3 100644 --- a/substrate/primitives/state-machine/src/read_only.rs +++ b/substrate/primitives/state-machine/src/read_only.rs @@ -88,39 +88,39 @@ where panic!("Should not be used in read-only externalities!") } - fn storage(&self, key: &[u8]) -> Option { + fn storage(&mut self, key: &[u8]) -> Option { self.backend .storage(key) .expect("Backed failed for storage in ReadOnlyExternalities") } - fn storage_hash(&self, key: &[u8]) -> Option> { + fn storage_hash(&mut self, key: &[u8]) -> Option> { self.backend .storage_hash(key) .expect("Backed failed for storage_hash in ReadOnlyExternalities") .map(|h| h.encode()) } - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.backend .child_storage(child_info, key) .expect("Backed failed for child_storage in ReadOnlyExternalities") } - fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + fn child_storage_hash(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.backend .child_storage_hash(child_info, key) .expect("Backed failed for child_storage_hash in ReadOnlyExternalities") .map(|h| h.encode()) } - fn next_storage_key(&self, key: &[u8]) -> Option { + fn next_storage_key(&mut self, key: &[u8]) -> Option { self.backend .next_storage_key(key) .expect("Backed failed for next_storage_key in ReadOnlyExternalities") } - fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + fn next_child_storage_key(&mut self, child_info: &ChildInfo, key: &[u8]) -> Option { self.backend .next_child_storage_key(child_info, key) .expect("Backed failed for next_child_storage_key in ReadOnlyExternalities") diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index e19ba95755c1b16f5cc6a2d59a8ede48997707f0..e9d64a891e819094d8a2c4d49265d01d6b7fc965 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -209,12 +209,15 @@ where /// /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open /// transactions. - pub fn as_backend(&self) -> InMemoryBackend { - let top: Vec<_> = - self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); + pub fn as_backend(&mut self) -> InMemoryBackend { + let top: Vec<_> = self + .overlay + .changes_mut() + .map(|(k, v)| (k.clone(), v.value().cloned())) + .collect(); let mut transaction = vec![(None, top)]; - for (child_changes, child_info) in self.overlay.children() { + for (child_changes, child_info) in self.overlay.children_mut() { transaction.push(( Some(child_info.clone()), child_changes.map(|(k, v)| (k.clone(), v.value().cloned())).collect(), @@ -293,13 +296,14 @@ where } } -impl PartialEq for TestExternalities +impl TestExternalities where + H: Hasher, H::Out: Ord + 'static + codec::Codec, { /// This doesn't test if they are in the same state, only if they contains the /// same data at this state - fn eq(&self, other: &TestExternalities) -> bool { + pub fn eq(&mut self, other: &mut TestExternalities) -> bool { self.as_backend().eq(&other.as_backend()) } } diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index bb893b25dc443f617ab5c0dd3d0d22c94646b689..c6f8491367c3009b4e494e1a340c3d2f3808f94b 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -16,25 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-core = { path = "../core", default-features = false } -sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } -sp-api = { path = "../api", default-features = false } -sp-application-crypto = { path = "../application-crypto", default-features = false } -sp-runtime-interface = { path = "../runtime-interface", default-features = false } -sp-externalities = { path = "../externalities", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-core = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-runtime = { workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-externalities = { workspace = true } thiserror = { optional = true, workspace = true } # ECIES dependencies -ed25519-dalek = { version = "2.1", optional = true } -x25519-dalek = { version = "2.0", optional = true, features = ["static_secrets"] } -curve25519-dalek = { version = "4.1.1", optional = true } -aes-gcm = { version = "0.10", optional = true } -hkdf = { version = "0.12.0", optional = true } -sha2 = { version = "0.10.7", optional = true } -rand = { version = "0.8.5", features = ["small_rng"], optional = true } +ed25519-dalek = { optional = true, workspace = true, default-features = true } +x25519-dalek = { optional = true, features = ["static_secrets"], workspace = true } +curve25519-dalek = { optional = true, workspace = true } +aes-gcm = { optional = true, workspace = true } +hkdf = { optional = true, workspace = true } +sha2 = { optional = true, workspace = true, default-features = true } +rand = { features = ["small_rng"], optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index c3318943d0d481a430da46502cdcb3a3c8aa22a7..3184ec010930ae7321ee4dc183844b6c0ab91839 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -17,11 +17,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-serde = { version = "0.4.0", optional = true, default-features = false } -ref-cast = "1.0.0" +codec = { features = ["derive"], workspace = true } +impl-serde = { optional = true, workspace = true } +ref-cast = { workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-debug-derive = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index 197994f574719ec0b000b8dbd79f333c71291708..3b9afae4ca078d1e69f5e33e9b3cfeee76b4a328 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -293,7 +293,7 @@ impl ChildInfo { } } - /// Return a the full location in the direct parent of + /// Return the full location in the direct parent of /// this trie. pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { @@ -302,7 +302,7 @@ impl ChildInfo { } } - /// Returns a the full location in the direct parent of + /// Returns the full location in the direct parent of /// this trie. pub fn into_prefixed_storage_key(self) -> PrefixedStorageKey { match self { diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index b7be614860910eb63f12eadaa3676f69b0e65ada..e223e8937653c0449e27e5b0e34a94fa03711982 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -15,12 +15,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true } -sp-application-crypto = { path = "../application-crypto", default-features = false } -sp-core = { path = "../core", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-application-crypto = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index c1bf9b3255eab83dea9eb9f80b93aa5440e816e1..7dccf741cd068c299a539c6a20550e4ebe4e9006 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -16,11 +16,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +async-trait = { optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } thiserror = { optional = true, workspace = true } -sp-inherents = { path = "../inherents", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/tracing/Cargo.toml b/substrate/primitives/tracing/Cargo.toml index 8adec1670dc2da99c315f58dbc37a92a3cf947f5..c434016604ac9eff0752d3924ce01b67acba15c0 100644 --- a/substrate/primitives/tracing/Cargo.toml +++ b/substrate/primitives/tracing/Cargo.toml @@ -21,11 +21,11 @@ features = ["with-tracing"] targets = ["wasm32-unknown-unknown", "x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = [ +codec = { features = [ "derive", -] } -tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.32", default-features = false } +], workspace = true } +tracing = { workspace = true } +tracing-core = { workspace = true } tracing-subscriber = { workspace = true, optional = true, features = [ "env-filter", "tracing-log", diff --git a/substrate/primitives/transaction-pool/Cargo.toml b/substrate/primitives/transaction-pool/Cargo.toml index a7deda64efce46f57e84826fbef8ce76598ab73b..964fb18b533eeb5a28fe3b6f0cafba5150528ec4 100644 --- a/substrate/primitives/transaction-pool/Cargo.toml +++ b/substrate/primitives/transaction-pool/Cargo.toml @@ -17,8 +17,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-api = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index 1e874c3595acd4723e34c0cb8568256ae2444317..f12b9ef118cbaa7447e9d8405d654312bc72bcc6 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -16,13 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-core = { path = "../core", optional = true, default-features = false } -sp-inherents = { path = "../inherents", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } -sp-trie = { path = "../trie", optional = true, default-features = false } +async-trait = { optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-core = { optional = true, workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } +sp-trie = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 45459c180d40d076b0d7b59232e9dc76cd0b2d27..1fe29f72014aa8ca078a33aa5e936a3308b20c2e 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -21,29 +21,29 @@ name = "bench" harness = false [dependencies] -ahash = { version = "0.8.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -hash-db = { version = "0.16.0", default-features = false } -lazy_static = { version = "1.4.0", optional = true } -memory-db = { version = "0.32.0", default-features = false } -nohash-hasher = { version = "0.2.0", optional = true } -parking_lot = { version = "0.12.1", optional = true } -rand = { version = "0.8", optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +ahash = { optional = true, workspace = true } +codec = { workspace = true } +hash-db = { workspace = true } +lazy_static = { optional = true, workspace = true } +memory-db = { workspace = true } +nohash-hasher = { optional = true, workspace = true } +parking_lot = { optional = true, workspace = true, default-features = true } +rand = { optional = true, workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } thiserror = { optional = true, workspace = true } -tracing = { version = "0.1.29", optional = true } -trie-db = { version = "0.29.0", default-features = false } -trie-root = { version = "0.18.0", default-features = false } -sp-core = { path = "../core", default-features = false } -sp-externalities = { path = "../externalities", default-features = false } -schnellru = { version = "0.2.1", optional = true } +tracing = { optional = true, workspace = true, default-features = true } +trie-db = { workspace = true } +trie-root = { workspace = true } +sp-core = { workspace = true } +sp-externalities = { workspace = true } +schnellru = { optional = true, workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -criterion = "0.5.1" -trie-bench = "0.39.0" -trie-standardmap = "0.16.0" -sp-runtime = { path = "../runtime" } +array-bytes = { workspace = true, default-features = true } +criterion = { workspace = true, default-features = true } +trie-bench = { workspace = true } +trie-standardmap = { workspace = true } +sp-runtime = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/trie/src/accessed_nodes_tracker.rs b/substrate/primitives/trie/src/accessed_nodes_tracker.rs new file mode 100644 index 0000000000000000000000000000000000000000..378e3c2812c06fe7530c391e06b390e8bec0c95f --- /dev/null +++ b/substrate/primitives/trie/src/accessed_nodes_tracker.rs @@ -0,0 +1,119 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Helpers for checking for duplicate nodes. + +use alloc::collections::BTreeSet; +use core::hash::Hash; +use scale_info::TypeInfo; +use sp_core::{Decode, Encode}; +use trie_db::{RecordedForKey, TrieAccess, TrieRecorder}; + +/// Error associated with the `AccessedNodesTracker` module. +#[derive(Encode, Decode, Clone, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// The proof contains unused nodes. + UnusedNodes, +} + +/// Helper struct used to ensure that a storage proof doesn't contain duplicate or unused nodes. +/// +/// The struct needs to be used as a `TrieRecorder` and `ensure_no_unused_nodes()` has to be called +/// to actually perform the check. +pub struct AccessedNodesTracker { + proof_nodes_count: usize, + recorder: BTreeSet, +} + +impl AccessedNodesTracker { + /// Create a new instance of `RedundantNodesChecker`, starting from a `RawStorageProof`. + pub fn new(proof_nodes_count: usize) -> Self { + Self { proof_nodes_count, recorder: BTreeSet::new() } + } + + /// Ensure that all the nodes in the proof have been accessed. + pub fn ensure_no_unused_nodes(self) -> Result<(), Error> { + if self.proof_nodes_count != self.recorder.len() { + return Err(Error::UnusedNodes) + } + + Ok(()) + } +} + +impl TrieRecorder for AccessedNodesTracker { + fn record(&mut self, access: TrieAccess) { + match access { + TrieAccess::NodeOwned { hash, .. } | + TrieAccess::EncodedNode { hash, .. } | + TrieAccess::Value { hash, .. } => { + self.recorder.insert(hash); + }, + _ => {}, + } + } + + fn trie_nodes_recorded_for_key(&self, _key: &[u8]) -> RecordedForKey { + RecordedForKey::None + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::{tests::create_storage_proof, StorageProof}; + use hash_db::Hasher; + use trie_db::{Trie, TrieDBBuilder}; + + type Hash = ::Out; + type Layout = crate::LayoutV1; + + const TEST_DATA: &[(&[u8], &[u8])] = + &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64])]; + + #[test] + fn proof_with_unused_nodes_is_rejected() { + let (raw_proof, root) = create_storage_proof::(TEST_DATA); + let proof = StorageProof::new(raw_proof.clone()); + let proof_nodes_count = proof.len(); + + let mut accessed_nodes_tracker = AccessedNodesTracker::::new(proof_nodes_count); + { + let db = proof.clone().into_memory_db(); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut accessed_nodes_tracker) + .build(); + + trie.get(b"key1").unwrap().unwrap(); + trie.get(b"key2").unwrap().unwrap(); + trie.get(b"key3").unwrap().unwrap(); + } + assert_eq!(accessed_nodes_tracker.ensure_no_unused_nodes(), Ok(())); + + let mut accessed_nodes_tracker = AccessedNodesTracker::::new(proof_nodes_count); + { + let db = proof.into_memory_db(); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut accessed_nodes_tracker) + .build(); + + trie.get(b"key1").unwrap().unwrap(); + trie.get(b"key2").unwrap().unwrap(); + } + assert_eq!(accessed_nodes_tracker.ensure_no_unused_nodes(), Err(Error::UnusedNodes)); + } +} diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 54f202eda0c9ac3d3991c094b3f406f8d78d4c4d..ef6b6a5743c2bbc6f3606a5dd645ff0e47c8a360 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -21,6 +21,7 @@ extern crate alloc; +pub mod accessed_nodes_tracker; #[cfg(feature = "std")] pub mod cache; mod error; @@ -28,6 +29,7 @@ mod node_codec; mod node_header; #[cfg(feature = "std")] pub mod recorder; +pub mod recorder_ext; mod storage_proof; mod trie_codec; mod trie_stream; @@ -46,7 +48,7 @@ use hash_db::{Hasher, Prefix}; pub use memory_db::{prefixed_key, HashKey, KeyFunction, PrefixedKey}; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::{CompactProof, StorageProof}; +pub use storage_proof::{CompactProof, StorageProof, StorageProofError}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; @@ -64,6 +66,9 @@ pub use trie_db::{proof::VerifyError, MerkleValue}; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; +/// Raw storage proof type (just raw trie nodes). +pub type RawStorageProof = Vec>; + /// substrate trie layout pub struct LayoutV0(PhantomData); @@ -195,11 +200,11 @@ pub type MemoryDB = memory_db::MemoryDB, trie_db::DB /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type GenericMemoryDB = memory_db::MemoryDB; -/// Persistent trie database read-access interface for the a given hasher. +/// Persistent trie database read-access interface for a given hasher. pub type TrieDB<'a, 'cache, L> = trie_db::TrieDB<'a, 'cache, L>; /// Builder for creating a [`TrieDB`]. pub type TrieDBBuilder<'a, 'cache, L> = trie_db::TrieDBBuilder<'a, 'cache, L>; -/// Persistent trie database write-access interface for the a given hasher. +/// Persistent trie database write-access interface for a given hasher. pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; /// Builder for creating a [`TrieDBMut`]. pub type TrieDBMutBuilder<'a, L> = trie_db::TrieDBMutBuilder<'a, L>; @@ -212,17 +217,17 @@ pub type TrieHash = <::Hash as Hasher>::Out; pub mod trie_types { use super::*; - /// Persistent trie database read-access interface for the a given hasher. + /// Persistent trie database read-access interface for a given hasher. /// /// Read only V1 and V0 are compatible, thus we always use V1. pub type TrieDB<'a, 'cache, H> = super::TrieDB<'a, 'cache, LayoutV1>; /// Builder for creating a [`TrieDB`]. pub type TrieDBBuilder<'a, 'cache, H> = super::TrieDBBuilder<'a, 'cache, LayoutV1>; - /// Persistent trie database write-access interface for the a given hasher. + /// Persistent trie database write-access interface for a given hasher. pub type TrieDBMutV0<'a, H> = super::TrieDBMut<'a, LayoutV0>; /// Builder for creating a [`TrieDBMutV0`]. pub type TrieDBMutBuilderV0<'a, H> = super::TrieDBMutBuilder<'a, LayoutV0>; - /// Persistent trie database write-access interface for the a given hasher. + /// Persistent trie database write-access interface for a given hasher. pub type TrieDBMutV1<'a, H> = super::TrieDBMut<'a, LayoutV1>; /// Builder for creating a [`TrieDBMutV1`]. pub type TrieDBMutBuilderV1<'a, H> = super::TrieDBMutBuilder<'a, LayoutV1>; @@ -616,6 +621,50 @@ mod tests { type MemoryDBMeta = memory_db::MemoryDB, trie_db::DBValue>; + pub fn create_trie( + data: &[(&[u8], &[u8])], + ) -> (MemoryDB, trie_db::TrieHash) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = trie_db::TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in data { + trie.insert(k, v).expect("Inserts data"); + } + } + + let mut recorder = Recorder::::new(); + { + let trie = trie_db::TrieDBBuilder::::new(&mut db, &mut root) + .with_recorder(&mut recorder) + .build(); + for (k, _v) in data { + trie.get(k).unwrap(); + } + } + + (db, root) + } + + pub fn create_storage_proof( + data: &[(&[u8], &[u8])], + ) -> (RawStorageProof, trie_db::TrieHash) { + let (db, root) = create_trie::(data); + + let mut recorder = Recorder::::new(); + { + let trie = trie_db::TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut recorder) + .build(); + for (k, _v) in data { + trie.get(k).unwrap(); + } + } + + (recorder.drain().into_iter().map(|record| record.data).collect(), root) + } + fn hashed_null_node() -> TrieHash { ::hashed_null_node() } diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 22a22b33b370994d554415519b4ee42fd82ae891..2886577eddc60a3a902321f312acdeb9a5262965 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -145,7 +145,7 @@ impl Recorder { /// Convert the recording to a [`StorageProof`]. /// - /// In contrast to [`Self::drain_storage_proof`] this doesn't consumes and doesn't clears the + /// In contrast to [`Self::drain_storage_proof`] this doesn't consume and doesn't clear the /// recordings. /// /// Returns the [`StorageProof`]. @@ -429,7 +429,8 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { #[cfg(test)] mod tests { use super::*; - use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; + use crate::tests::create_trie; + use trie_db::{Trie, TrieDBBuilder, TrieRecorder}; type MemoryDB = crate::MemoryDB; type Layout = crate::LayoutV1; @@ -438,23 +439,9 @@ mod tests { const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key4", &[4; 64])]; - fn create_trie() -> (MemoryDB, TrieHash) { - let mut db = MemoryDB::default(); - let mut root = Default::default(); - - { - let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); - for (k, v) in TEST_DATA { - trie.insert(k, v).expect("Inserts data"); - } - } - - (db, root) - } - #[test] fn recorder_works() { - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); @@ -498,7 +485,7 @@ mod tests { #[test] fn recorder_transactions_rollback_work() { - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); let mut stats = vec![RecorderStats::default()]; @@ -547,7 +534,7 @@ mod tests { #[test] fn recorder_transactions_commit_work() { - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); @@ -586,7 +573,7 @@ mod tests { #[test] fn recorder_transactions_commit_and_rollback_work() { - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); @@ -645,7 +632,7 @@ mod tests { #[test] fn recorder_transaction_accessed_keys_works() { let key = TEST_DATA[0].0; - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); diff --git a/substrate/primitives/trie/src/recorder_ext.rs b/substrate/primitives/trie/src/recorder_ext.rs new file mode 100644 index 0000000000000000000000000000000000000000..866d5b72c5d64f4fb28178645ea9425158bf0ffc --- /dev/null +++ b/substrate/primitives/trie/src/recorder_ext.rs @@ -0,0 +1,47 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Extension for the default recorder. + +use crate::RawStorageProof; +use alloc::{collections::BTreeSet, vec::Vec}; +use trie_db::{Recorder, TrieLayout}; + +/// Convenience extension for the `Recorder` struct. +/// +/// Used to deduplicate some logic. +pub trait RecorderExt +where + Self: Sized, +{ + /// Convert the recorder into a `BTreeSet`. + fn into_set(self) -> BTreeSet>; + + /// Convert the recorder into a `RawStorageProof`, avoiding duplicate nodes. + fn into_raw_storage_proof(self) -> RawStorageProof { + // The recorder may record the same trie node multiple times, + // and we don't want duplicate nodes in our proofs + // => let's deduplicate it by collecting to a BTreeSet first + self.into_set().into_iter().collect() + } +} + +impl RecorderExt for Recorder { + fn into_set(mut self) -> BTreeSet> { + self.drain().into_iter().map(|record| record.data).collect::>() + } +} diff --git a/substrate/primitives/trie/src/storage_proof.rs b/substrate/primitives/trie/src/storage_proof.rs index e46c49be19cb84af46f63003abcdb56ca421d9d0..a9f6298742f648953ecd662dd27fca2b63c6c29e 100644 --- a/substrate/primitives/trie/src/storage_proof.rs +++ b/substrate/primitives/trie/src/storage_proof.rs @@ -25,6 +25,13 @@ use scale_info::TypeInfo; // with `LayoutV0`. use crate::LayoutV1 as Layout; +/// Error associated with the `storage_proof` module. +#[derive(Encode, Decode, Clone, Eq, PartialEq, Debug, TypeInfo)] +pub enum StorageProofError { + /// The proof contains duplicate nodes. + DuplicateNodes, +} + /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that /// does not already have access to the key-value pairs. @@ -43,6 +50,22 @@ impl StorageProof { StorageProof { trie_nodes: BTreeSet::from_iter(trie_nodes) } } + /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. + /// + /// Returns an error if the provided subset of encoded trie nodes contains duplicates. + pub fn new_with_duplicate_nodes_check( + trie_nodes: impl IntoIterator>, + ) -> Result { + let mut trie_nodes_set = BTreeSet::new(); + for node in trie_nodes { + if !trie_nodes_set.insert(node) { + return Err(StorageProofError::DuplicateNodes); + } + } + + Ok(StorageProof { trie_nodes: trie_nodes_set }) + } + /// Returns a new empty proof. /// /// An empty proof is capable of only proving trivial statements (ie. that an empty set of @@ -56,6 +79,11 @@ impl StorageProof { self.trie_nodes.is_empty() } + /// Returns the number of nodes in the proof. + pub fn len(&self) -> usize { + self.trie_nodes.len() + } + /// Convert into an iterator over encoded trie nodes in lexicographical order constructed /// from the proof. pub fn into_iter_nodes(self) -> impl Sized + DoubleEndedIterator> { @@ -198,3 +226,23 @@ impl CompactProof { Ok((db, root)) } } + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::{tests::create_storage_proof, StorageProof}; + + type Layout = crate::LayoutV1; + + const TEST_DATA: &[(&[u8], &[u8])] = + &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key11", &[4; 64])]; + + #[test] + fn proof_with_duplicate_nodes_is_rejected() { + let (raw_proof, _root) = create_storage_proof::(TEST_DATA); + assert!(matches!( + StorageProof::new_with_duplicate_nodes_check(raw_proof), + Err(StorageProofError::DuplicateNodes) + )); + } +} diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index f8ef8f66c5355634dbb4a7e1f08adbdb2f035766..65a7c63f432ddb119897f26bd398d671f18ea70a 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-serde = { version = "0.4.0", default-features = false, optional = true } -parity-wasm = { version = "0.45", optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +impl-serde = { optional = true, workspace = true } +parity-wasm = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } thiserror = { optional = true, workspace = true } -sp-crypto-hashing-proc-macro = { path = "../crypto/hashing/proc-macro" } -sp-runtime = { path = "../runtime", default-features = false } -sp-std = { path = "../std", default-features = false } -sp-version-proc-macro = { path = "proc-macro", default-features = false } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-version-proc-macro = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index 3abd5c0910694cf14a8885ecebf4cc855e9e8266..1feea15b9fcdb84a5608ae6cb8739e4d5e9e8e0c 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -proc-macro2 = "1.0.56" +codec = { features = ["derive"], workspace = true, default-features = true } +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } [dev-dependencies] -sp-version = { path = ".." } +sp-version = { workspace = true, default-features = true } diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index a0c8342d2d3c5014527487faf187fc48fd5344e1..f7a1d25d16bf37827df65c74a89fe3bf2232c0fc 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -17,11 +17,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2.2" +codec = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { optional = true, workspace = true, default-features = true } -wasmtime = { version = "8.0.1", default-features = false, optional = true } -anyhow = { version = "1.0.81", optional = true } +wasmtime = { optional = true, workspace = true } +anyhow = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index d2d72a7cb019fa5ad5c8fd66a32e953f9076068f..c9f4f39d041c2f28b978578a9dd830ce7ae9ab68 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -16,14 +16,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bounded-collections = { version = "0.2.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +bounded-collections = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } -smallvec = "1.11.0" -sp-arithmetic = { path = "../arithmetic", default-features = false } -sp-debug-derive = { path = "../debug-derive", default-features = false } -schemars = { version = "0.8.3", default-features = false, optional = true } +smallvec = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true } +sp-debug-derive = { workspace = true } +schemars = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/scripts/ci/node-template-release/Cargo.toml b/substrate/scripts/ci/node-template-release/Cargo.toml index 8e3e6138b9a8dabdbd67589049e5273305d29a0a..5ec665de05ea9337b5f6a8824b13fca7a4c710cb 100644 --- a/substrate/scripts/ci/node-template-release/Cargo.toml +++ b/substrate/scripts/ci/node-template-release/Cargo.toml @@ -14,11 +14,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -flate2 = "1.0" -fs_extra = "1.3" -glob = "0.3" -tar = "0.4" -tempfile = "3" -toml_edit = "0.19" -itertools = "0.11" +clap = { features = ["derive"], workspace = true } +flate2 = { workspace = true } +fs_extra = { workspace = true } +glob = { workspace = true } +tar = { workspace = true } +tempfile = { workspace = true } +toml_edit = { workspace = true } +itertools = { workspace = true } diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml index 56b1c038199a81fb84c03474f0d7c5bbf5ab2c67..71aac02ba9b64260f50e0f0f832c9a85f6bc2f5b 100644 --- a/substrate/test-utils/Cargo.toml +++ b/substrate/test-utils/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.30" -tokio = { version = "1.22.0", features = ["macros", "time"] } +futures = { workspace = true } +tokio = { features = ["macros", "time"], workspace = true, default-features = true } [dev-dependencies] -trybuild = { version = "1.0.88", features = ["diff"] } -sc-service = { path = "../client/service" } +trybuild = { features = ["diff"], workspace = true } +sc-service = { workspace = true, default-features = true } diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index 87c595c66f3484cbbaa8d8a6d83ea09453ee99b9..52642b9f62bb96277a29fdb91894c34cfd043f64 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -substrate-rpc-client = { path = "../../utils/frame/rpc/client" } -sp-rpc = { path = "../../primitives/rpc" } -assert_cmd = "2.0.10" -nix = { version = "0.28.0", features = ["signal"] } -regex = "1.7.3" -tokio = { version = "1.22.0", features = ["full"] } -node-primitives = { path = "../../bin/node/primitives" } -node-cli = { package = "staging-node-cli", path = "../../bin/node/cli" } -sc-cli = { path = "../../client/cli" } -sc-service = { path = "../../client/service" } -futures = "0.3.28" +substrate-rpc-client = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +assert_cmd = { workspace = true } +nix = { features = ["signal"], workspace = true } +regex = { workspace = true } +tokio = { features = ["full"], workspace = true, default-features = true } +node-primitives = { workspace = true, default-features = true } +node-cli = { workspace = true } +sc-cli = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +futures = { workspace = true } [features] try-runtime = ["node-cli/try-runtime"] diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 5871f1bf5b4d05ad35f0998e7b4ab9fb3b953d58..ecf04dac1a67350d72aebc07c7af8499ee4145a7 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -16,27 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" +array-bytes = { workspace = true, default-features = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-client-api = { path = "../../client/api" } -sc-client-db = { path = "../../client/db", default-features = false, features = [ +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { features = [ "test-helpers", -] } -sc-consensus = { path = "../../client/consensus/common" } -sc-executor = { path = "../../client/executor" } -sc-offchain = { path = "../../client/offchain" } -sc-service = { path = "../../client/service", default-features = false, features = [ +], workspace = true } +sc-consensus = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-service = { features = [ "test-helpers", -] } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core" } -sp-keyring = { path = "../../primitives/keyring" } -sp-keystore = { path = "../../primitives/keystore" } -sp-runtime = { path = "../../primitives/runtime" } -sp-state-machine = { path = "../../primitives/state-machine" } -tokio = { version = "1.22.0", features = ["sync"] } +], workspace = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +tokio = { features = ["sync"], workspace = true, default-features = true } diff --git a/substrate/test-utils/client/src/client_ext.rs b/substrate/test-utils/client/src/client_ext.rs index 73581a4f0efa31c195f3856a8894692a2f71bcfb..9dc4739eb795437cff17fae69e5a0181cf150510 100644 --- a/substrate/test-utils/client/src/client_ext.rs +++ b/substrate/test-utils/client/src/client_ext.rs @@ -153,7 +153,7 @@ where Self: BlockImport, RA: Send, B: Send + Sync, - E: Send, + E: Send + Sync, { async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 8733ff9fcebb36199c341cd6d90a96623212944c..5c8f49a6db8590d1ff16771a0d27b4b2d8197d47 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -16,59 +16,59 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false, features = ["serde"] } -sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } -sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } -sp-block-builder = { path = "../../primitives/block-builder", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-keyring = { path = "../../primitives/keyring", default-features = false } -sp-offchain = { path = "../../primitives/offchain", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -frame-support = { path = "../../frame/support", default-features = false } -sp-version = { path = "../../primitives/version", default-features = false } -sp-session = { path = "../../primitives/session", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -pallet-babe = { path = "../../frame/babe", default-features = false } -pallet-balances = { path = "../../frame/balances", default-features = false } -frame-executive = { path = "../../frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../frame/metadata-hash-extension", default-features = false } -frame-system = { path = "../../frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../frame/system/rpc/runtime-api", default-features = false } -pallet-timestamp = { path = "../../frame/timestamp", default-features = false } -sp-consensus-grandpa = { path = "../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-trie = { path = "../../primitives/trie", default-features = false } -sp-transaction-pool = { path = "../../primitives/transaction-pool", default-features = false } -trie-db = { version = "0.29.0", default-features = false } -sc-service = { path = "../../client/service", default-features = false, features = ["test-helpers"], optional = true } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false } -sp-externalities = { path = "../../primitives/externalities", default-features = false } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-consensus-aura = { features = ["serde"], workspace = true } +sp-consensus-babe = { features = ["serde"], workspace = true } +sp-genesis-builder = { workspace = true } +sp-block-builder = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-inherents = { workspace = true } +sp-keyring = { workspace = true } +sp-offchain = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-crypto-hashing = { workspace = true } +sp-io = { workspace = true } +frame-support = { workspace = true } +sp-version = { workspace = true } +sp-session = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-timestamp = { workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-trie = { workspace = true } +sp-transaction-pool = { workspace = true } +trie-db = { workspace = true } +sc-service = { features = ["test-helpers"], optional = true, workspace = true } +sp-state-machine = { workspace = true } +sp-externalities = { workspace = true } # 3rd party -array-bytes = { version = "6.2.2", optional = true } +array-bytes = { optional = true, workspace = true, default-features = true } serde_json = { workspace = true, features = ["alloc"] } log = { workspace = true } -hex-literal = { version = "0.4.1" } +tracing = { workspace = true, default-features = false } [dev-dependencies] -futures = "0.3.30" -sc-block-builder = { path = "../../client/block-builder" } -sc-chain-spec = { path = "../../client/chain-spec" } -sc-executor = { path = "../../client/executor" } -sc-executor-common = { path = "../../client/executor/common" } -sp-consensus = { path = "../../primitives/consensus/common" } -substrate-test-runtime-client = { path = "client" } -sp-tracing = { path = "../../primitives/tracing" } +futures = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +sp-tracing = { workspace = true, default-features = true } serde = { features = ["alloc", "derive"], workspace = true } serde_json = { features = ["alloc"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true, features = ["metadata-hash"] } +substrate-wasm-builder = { optional = true, features = ["metadata-hash"], workspace = true, default-features = true } [features] default = ["std"] @@ -112,6 +112,7 @@ std = [ "sp-trie/std", "sp-version/std", "substrate-wasm-builder", + "tracing/std", "trie-db/std", ] diff --git a/substrate/test-utils/runtime/client/Cargo.toml b/substrate/test-utils/runtime/client/Cargo.toml index 5ca24fea33edab61162ea1e98a20bf8eb7e2b2cf..3c628d1e764fda5a78dd497a81d9c17a2da7d465 100644 --- a/substrate/test-utils/runtime/client/Cargo.toml +++ b/substrate/test-utils/runtime/client/Cargo.toml @@ -15,14 +15,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.30" -sc-block-builder = { path = "../../../client/block-builder" } -sc-client-api = { path = "../../../client/api" } -sc-consensus = { path = "../../../client/consensus/common" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -substrate-test-client = { path = "../../client" } -substrate-test-runtime = { path = ".." } +futures = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } +substrate-test-runtime = { workspace = true } diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 0aab6d3f01ca075b85f1c43f879168e2a0e5280d..a13441302e4df119584c056b7e2e62552d7328d6 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -596,7 +596,11 @@ impl_runtime_apis! { } fn do_trace_log() { - log::trace!("Hey I'm runtime"); + log::trace!(target: "test", "Hey I'm runtime"); + + let data = "THIS IS TRACING"; + + tracing::trace!(target: "test", %data, "Hey, I'm tracing"); } fn verify_ed25519(sig: ed25519::Signature, public: ed25519::Public, message: Vec) -> bool { @@ -874,7 +878,7 @@ pub mod storage_key_generator { sp_crypto_hashing::twox_64(x).iter().chain(x.iter()).cloned().collect() } - /// Generate the hashed storage keys from the raw literals. These keys are expected to be be in + /// Generate the hashed storage keys from the raw literals. These keys are expected to be in /// storage with given substrate-test runtime. pub fn generate_expected_storage_hashed_keys(custom_heap_pages: bool) -> Vec { let mut literals: Vec<&[u8]> = vec![b":code", b":extrinsic_index"]; diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index 360e2b7b810d1f40d4ac4884eedb60266e41cc5c..073997da025d2af4863029e38b3db18520fea189 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -15,12 +15,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -parking_lot = "0.12.1" +codec = { workspace = true, default-features = true } +futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } thiserror = { workspace = true } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-runtime = { path = "../../../primitives/runtime" } -substrate-test-runtime-client = { path = "../client" } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index fd35e6b1e1a25ada0e3cbd7012708a66b3f940f5..c0b65d731f7a6121e1f1355f6a877786b3456b71 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -12,15 +12,15 @@ homepage = "https://substrate.io" workspace = true [dependencies] -array-bytes = { version = "6.2.2", optional = true } +array-bytes = { optional = true, workspace = true, default-features = true } log = { optional = true, workspace = true } -hash-db = { version = "0.16.0", default-features = false } +hash-db = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -env_logger = "0.11" -sp-core = { path = "../../primitives/core" } -sp-runtime = { path = "../../primitives/runtime" } +array-bytes = { workspace = true, default-features = true } +env_logger = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [features] debug = ["array-bytes", "log"] diff --git a/substrate/utils/fork-tree/Cargo.toml b/substrate/utils/fork-tree/Cargo.toml index 275f44623bd1455cb34caa544fbfa4a6e140111b..2bb799c603c844acd1baf734a16da05787c9c8bc 100644 --- a/substrate/utils/fork-tree/Cargo.toml +++ b/substrate/utils/fork-tree/Cargo.toml @@ -17,4 +17,4 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 7cfacdc2e5edea443de8742fbe23cf9245872f08..bd1a22affd03b73218c6863ff62c64f58f935a86 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -16,49 +16,49 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -chrono = "0.4" -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } -comfy-table = { version = "7.1.0", default-features = false } -handlebars = "5.1.0" -Inflector = "0.11.4" -itertools = "0.11" -lazy_static = "1.4.0" -linked-hash-map = "0.5.4" +array-bytes = { workspace = true, default-features = true } +chrono = { workspace = true } +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } +comfy-table = { workspace = true } +handlebars = { workspace = true } +Inflector = { workspace = true } +itertools = { workspace = true } +lazy_static = { workspace = true } +linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } -rand = { version = "0.8.5", features = ["small_rng"] } -rand_pcg = "0.3.1" +rand = { features = ["small_rng"], workspace = true, default-features = true } +rand_pcg = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -thousands = "0.2.0" -frame-benchmarking = { path = "../../../frame/benchmarking" } -frame-support = { path = "../../../frame/support" } -frame-system = { path = "../../../frame/system" } -sc-block-builder = { path = "../../../client/block-builder" } -sc-chain-spec = { path = "../../../client/chain-spec", default-features = false } -sc-cli = { path = "../../../client/cli", default-features = false } -sc-client-api = { path = "../../../client/api" } -sc-client-db = { path = "../../../client/db", default-features = false } -sc-executor = { path = "../../../client/executor" } -sc-service = { path = "../../../client/service", default-features = false } -sc-sysinfo = { path = "../../../client/sysinfo" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core" } -sp-database = { path = "../../../primitives/database" } -sp-externalities = { path = "../../../primitives/externalities" } -sp-genesis-builder = { path = "../../../primitives/genesis-builder" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } -sp-storage = { path = "../../../primitives/storage" } -sp-trie = { path = "../../../primitives/trie" } -sp-io = { path = "../../../primitives/io" } -sp-wasm-interface = { path = "../../../primitives/wasm-interface" } -gethostname = "0.2.3" +thousands = { workspace = true } +frame-benchmarking = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true } +sc-cli = { workspace = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } +sc-executor = { workspace = true, default-features = true } +sc-service = { workspace = true } +sc-sysinfo = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-database = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } +gethostname = { workspace = true } [features] default = ["rocksdb"] diff --git a/substrate/utils/frame/generate-bags/Cargo.toml b/substrate/utils/frame/generate-bags/Cargo.toml index 2688254bd5ea3e63b1634107d6f15d27c97843e2..934028c9608dc9f6aeee45e6bec48c3cd5e700c9 100644 --- a/substrate/utils/frame/generate-bags/Cargo.toml +++ b/substrate/utils/frame/generate-bags/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # FRAME -frame-support = { path = "../../../frame/support" } -frame-election-provider-support = { path = "../../../frame/election-provider-support" } -frame-system = { path = "../../../frame/system" } -pallet-staking = { path = "../../../frame/staking" } -sp-staking = { path = "../../../primitives/staking" } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } # third party -chrono = { version = "0.4.31" } -num-format = "0.4.3" +chrono = { workspace = true } +num-format = { workspace = true } diff --git a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml index 37d96d7e12b963b7777ac4f21e70e5c419515d37..6dcbca4b97e2457693079afe0de3d5eed767572c 100644 --- a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -13,8 +13,8 @@ publish = false workspace = true [dependencies] -kitchensink-runtime = { path = "../../../../bin/node/runtime" } -generate-bags = { path = ".." } +kitchensink-runtime = { workspace = true } +generate-bags = { workspace = true, default-features = true } # third-party -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } diff --git a/substrate/utils/frame/omni-bencher/Cargo.toml b/substrate/utils/frame/omni-bencher/Cargo.toml index 0c2d1a1b32b1f973955795a7d883a3ef72b268aa..f8f44cb4b4387077d9c787512d11727202e0ee82 100644 --- a/substrate/utils/frame/omni-bencher/Cargo.toml +++ b/substrate/utils/frame/omni-bencher/Cargo.toml @@ -11,11 +11,11 @@ license.workspace = true workspace = true [dependencies] -clap = { version = "4.5.2", features = ["derive"] } -cumulus-primitives-proof-size-hostfunction = { path = "../../../../cumulus/primitives/proof-size-hostfunction" } -frame-benchmarking-cli = { path = "../benchmarking-cli", default-features = false } -sc-cli = { path = "../../../client/cli" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-statement-store = { path = "../../../primitives/statement-store" } -env_logger = "0.11.2" +clap = { features = ["derive"], workspace = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true } +sc-cli = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +env_logger = { workspace = true } log = { workspace = true } diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 2911d5eef65902af9a0bec6a679a89db9eb6fb7a..cc075da68c276ddc895585e733bbfc1995d337ac 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -15,24 +15,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22", features = ["http-client"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } +jsonrpsee = { features = ["http-client"], workspace = true } +codec = { workspace = true, default-features = true } log = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-state-machine = { path = "../../../primitives/state-machine" } -sp-io = { path = "../../../primitives/io" } -sp-runtime = { path = "../../../primitives/runtime" } -tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } -substrate-rpc-client = { path = "../rpc/client" } -futures = "0.3.30" -indicatif = "0.17.7" -spinners = "4.1.0" -tokio-retry = "0.3.0" +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } +substrate-rpc-client = { workspace = true, default-features = true } +futures = { workspace = true } +indicatif = { workspace = true } +spinners = { workspace = true } +tokio-retry = { workspace = true } [dev-dependencies] -sp-tracing = { path = "../../../primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } [features] remote-test = [] diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 0ecb98f31343aa050d9daa8f33445e858631813d..40864085349b090a947743cfa32016e1bb016fc7 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -22,10 +22,7 @@ use codec::{Compact, Decode, Encode}; use indicatif::{ProgressBar, ProgressStyle}; -use jsonrpsee::{ - core::params::ArrayParams, - http_client::{HttpClient, HttpClientBuilder}, -}; +use jsonrpsee::{core::params::ArrayParams, http_client::HttpClient}; use log::*; use serde::de::DeserializeOwned; use sp_core::{ @@ -190,7 +187,7 @@ impl Transport { } else { uri.clone() }; - let http_client = HttpClientBuilder::default() + let http_client = HttpClient::builder() .max_request_size(u32::MAX) .max_response_size(u32::MAX) .request_timeout(std::time::Duration::from_secs(60 * 5)) @@ -1383,7 +1380,7 @@ mod remote_tests { init_logger(); // create an ext with children keys - let child_ext = Builder::::new() + let mut child_ext = Builder::::new() .mode(Mode::Online(OnlineConfig { transport: endpoint().clone().into(), pallets: vec!["Proxy".to_owned()], @@ -1396,7 +1393,7 @@ mod remote_tests { .unwrap(); // create an ext without children keys - let ext = Builder::::new() + let mut ext = Builder::::new() .mode(Mode::Online(OnlineConfig { transport: endpoint().clone().into(), pallets: vec!["Proxy".to_owned()], diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index 501bb95b257949f2b91678d361d210c440057906..1300fae9fff29e6a4b31e85f536913e2cbe6fa05 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -15,13 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22", features = ["ws-client"] } -sc-rpc-api = { path = "../../../../client/rpc-api" } -async-trait = "0.1.79" +jsonrpsee = { features = ["ws-client"], workspace = true } +sc-rpc-api = { workspace = true, default-features = true } +async-trait = { workspace = true } serde = { workspace = true, default-features = true } -sp-runtime = { path = "../../../../primitives/runtime" } +sp-runtime = { workspace = true, default-features = true } log = { workspace = true, default-features = true } [dev-dependencies] -tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread", "sync"] } -sp-core = { path = "../../../../primitives/core" } +tokio = { features = ["macros", "rt-multi-thread", "sync"], workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index ee3bf5eb68d716548a4a738f80254cfe811c651f..c8ff281301e36b5d00a941550a89fc24ca87e80f 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -16,20 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } -sp-core = { path = "../../../../primitives/core" } -sp-state-machine = { path = "../../../../primitives/state-machine" } -sp-trie = { path = "../../../../primitives/trie" } -trie-db = "0.29.0" +sp-core = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +trie-db = { workspace = true, default-features = true } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } # Substrate Dependencies -sc-client-api = { path = "../../../../client/api" } -sc-rpc-api = { path = "../../../../client/rpc-api" } -sp-runtime = { path = "../../../../primitives/runtime" } +sc-client-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index bf566f909ecb712d8b00a26717739243adb06129..e20bae730c7eeb98c35ac1105316b30f8c3058c3 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -15,17 +15,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22", features = ["jsonrpsee-types"] } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["jsonrpsee-types"], workspace = true } serde = { workspace = true, default-features = true } -frame-support = { path = "../../../../frame/support" } -sc-rpc-api = { path = "../../../../client/rpc-api" } -sp-storage = { path = "../../../../primitives/storage" } +frame-support = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } [dev-dependencies] -scale-info = "2.11.1" -jsonrpsee = { version = "0.22", features = ["jsonrpsee-types", "ws-client"] } -tokio = "1.37" -sp-core = { path = "../../../../primitives/core" } -sp-runtime = { path = "../../../../primitives/runtime" } -frame-system = { path = "../../../../frame/system" } +scale-info = { workspace = true, default-features = true } +jsonrpsee = { features = ["jsonrpsee-types", "ws-client"], workspace = true } +tokio = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 75d24e8e210fcde7822b6c53079a2f958f1d956a..4e866113fd2ee09e0e63c73eecc82c5cdd8191c1 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -16,27 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.30" -codec = { package = "parity-scale-codec", version = "3.6.12" } -docify = "0.2.0" -jsonrpsee = { version = "0.22.5", features = [ +futures = { workspace = true } +codec = { workspace = true, default-features = true } +docify = { workspace = true } +jsonrpsee = { features = [ "client-core", "macros", "server-core", -] } +], workspace = true } log = { workspace = true, default-features = true } -frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } -sc-rpc-api = { path = "../../../../client/rpc-api" } -sc-transaction-pool-api = { path = "../../../../client/transaction-pool/api" } -sp-api = { path = "../../../../primitives/api" } -sp-block-builder = { path = "../../../../primitives/block-builder" } -sp-blockchain = { path = "../../../../primitives/blockchain" } -sp-core = { path = "../../../../primitives/core" } -sp-runtime = { path = "../../../../primitives/runtime" } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -sc-transaction-pool = { path = "../../../../client/transaction-pool" } -tokio = "1.37" -assert_matches = "1.3.0" -sp-tracing = { path = "../../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } +sc-transaction-pool = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } +assert_matches = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/utils/prometheus/Cargo.toml b/substrate/utils/prometheus/Cargo.toml index 36527ac6183bb921266023320345afb9f6a89246..9f5516cd8d7419bb7871d2e352b77fbcb83bd377 100644 --- a/substrate/utils/prometheus/Cargo.toml +++ b/substrate/utils/prometheus/Cargo.toml @@ -16,12 +16,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hyper = { version = "0.14.16", default-features = false, features = ["http1", "server", "tcp"] } +http-body-util = { workspace = true } +hyper = { features = ["http1", "server"], workspace = true } +hyper-util = { features = ["server-auto", "tokio"], workspace = true } log = { workspace = true, default-features = true } -prometheus = { version = "0.13.0", default-features = false } +prometheus = { workspace = true } thiserror = { workspace = true } -tokio = { version = "1.22.0", features = ["parking_lot"] } +tokio = { features = ["parking_lot"], workspace = true, default-features = true } [dev-dependencies] -hyper = { version = "0.14.16", features = ["client"] } -tokio = { version = "1.22.0", features = ["rt-multi-thread"] } +hyper-util = { features = ["client-legacy", "tokio"], workspace = true, default-features = true } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } diff --git a/substrate/utils/prometheus/src/lib.rs b/substrate/utils/prometheus/src/lib.rs index ed1f9137aec4efad97218b2340e49436a05fd577..7a8c65590605265aebc91f02b9f17880d3ab926f 100644 --- a/substrate/utils/prometheus/src/lib.rs +++ b/substrate/utils/prometheus/src/lib.rs @@ -15,12 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use hyper::{ - http::StatusCode, - server::Server, - service::{make_service_fn, service_fn}, - Body, Request, Response, -}; +mod sourced; + +use hyper::{http::StatusCode, Request, Response}; +use prometheus::{core::Collector, Encoder, TextEncoder}; +use std::net::SocketAddr; + pub use prometheus::{ self, core::{ @@ -30,13 +30,10 @@ pub use prometheus::{ exponential_buckets, Error as PrometheusError, Histogram, HistogramOpts, HistogramVec, Opts, Registry, }; -use prometheus::{core::Collector, Encoder, TextEncoder}; -use std::net::SocketAddr; - -mod sourced; - pub use sourced::{MetricSource, SourcedCounter, SourcedGauge, SourcedMetric}; +type Body = http_body_util::Full; + pub fn register( metric: T, registry: &Registry, @@ -63,7 +60,10 @@ pub enum Error { PortInUse(SocketAddr), } -async fn request_metrics(req: Request, registry: Registry) -> Result, Error> { +async fn request_metrics( + req: Request, + registry: Registry, +) -> Result, Error> { if req.uri().path() == "/metrics" { let metric_families = registry.gather(); let mut buffer = vec![]; @@ -98,46 +98,49 @@ async fn init_prometheus_with_listener( listener: tokio::net::TcpListener, registry: Registry, ) -> Result<(), Error> { - let listener = hyper::server::conn::AddrIncoming::from_listener(listener)?; - log::info!("ใ€ฝ๏ธ Prometheus exporter started at {}", listener.local_addr()); - - let service = make_service_fn(move |_| { - let registry = registry.clone(); - - async move { - Ok::<_, hyper::Error>(service_fn(move |req: Request| { - request_metrics(req, registry.clone()) - })) - } - }); + log::info!(target: "prometheus", "ใ€ฝ๏ธ Prometheus exporter started at {}", listener.local_addr()?); - let (signal, on_exit) = tokio::sync::oneshot::channel::<()>(); - let server = Server::builder(listener).serve(service).with_graceful_shutdown(async { - let _ = on_exit.await; - }); + let server = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new()); - let result = server.await.map_err(Into::into); + loop { + let io = match listener.accept().await { + Ok((sock, _)) => hyper_util::rt::TokioIo::new(sock), + Err(e) => { + log::debug!(target: "prometheus", "Error accepting connection: {:?}", e); + continue; + }, + }; - // Gracefully shutdown server, otherwise the server does not stop if it has open connections - let _ = signal.send(()); + let registry = registry.clone(); - result + let conn = server + .serve_connection_with_upgrades( + io, + hyper::service::service_fn(move |req| request_metrics(req, registry.clone())), + ) + .into_owned(); + + tokio::spawn(async move { + if let Err(err) = conn.await { + log::debug!(target: "prometheus", "connection error: {:?}", err); + } + }); + } } #[cfg(test)] mod tests { use super::*; - use hyper::{Client, Uri}; - - #[test] - fn prometheus_works() { - const METRIC_NAME: &str = "test_test_metric_name_test_test"; + use http_body_util::BodyExt; + use hyper::Uri; + use hyper_util::{client::legacy::Client, rt::TokioExecutor}; - let runtime = tokio::runtime::Runtime::new().expect("Creates the runtime"); + const METRIC_NAME: &str = "test_test_metric_name_test_test"; - let listener = runtime - .block_on(tokio::net::TcpListener::bind("127.0.0.1:0")) - .expect("Creates listener"); + #[tokio::test] + async fn prometheus_works() { + let listener = + tokio::net::TcpListener::bind("127.0.0.1:0").await.expect("Creates listener"); let local_addr = listener.local_addr().expect("Returns the local addr"); @@ -148,20 +151,20 @@ mod tests { ) .expect("Registers the test metric"); - runtime.spawn(init_prometheus_with_listener(listener, registry)); + tokio::spawn(init_prometheus_with_listener(listener, registry)); - runtime.block_on(async { - let client = Client::new(); + let client = Client::builder(TokioExecutor::new()).build_http::(); - let res = client - .get(Uri::try_from(&format!("http://{}/metrics", local_addr)).expect("Parses URI")) - .await - .expect("Requests metrics"); + let res = client + .get(Uri::try_from(&format!("http://{}/metrics", local_addr)).expect("Parses URI")) + .await + .expect("Requests metrics"); - let buf = hyper::body::to_bytes(res).await.expect("Converts body to bytes"); + assert!(res.status().is_success()); - let body = String::from_utf8(buf.to_vec()).expect("Converts body to String"); - assert!(body.contains(&format!("{} 0", METRIC_NAME))); - }); + let buf = res.into_body().collect().await.expect("Failed to read HTTP body").to_bytes(); + let body = String::from_utf8(buf.to_vec()).expect("Converts body to String"); + + assert!(body.contains(&format!("{} 0", METRIC_NAME))); } } diff --git a/substrate/utils/substrate-bip39/Cargo.toml b/substrate/utils/substrate-bip39/Cargo.toml index a46f81ee24d96d666495ee5cea5ca58b415a9cb0..e5270ea62f4c8d557ac538f0e02327209e7d403f 100644 --- a/substrate/utils/substrate-bip39/Cargo.toml +++ b/substrate/utils/substrate-bip39/Cargo.toml @@ -9,15 +9,15 @@ edition.workspace = true repository.workspace = true [dependencies] -hmac = "0.12.1" -pbkdf2 = { version = "0.12.2", default-features = false } -schnorrkel = { version = "0.11.4", default-features = false } -sha2 = { version = "0.10.7", default-features = false } -zeroize = { version = "1.4.3", default-features = false } +hmac = { workspace = true } +pbkdf2 = { workspace = true } +schnorrkel = { workspace = true } +sha2 = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] -bip39 = "2.0.0" -rustc-hex = "2.1.0" +bip39 = { workspace = true } +rustc-hex = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 090955494f0a7572a08407c2aed26919863b3837..f084400c12e8daf5dc571a16e4df97ea26230a58 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -15,29 +15,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -build-helper = "0.1.1" -cargo_metadata = "0.15.4" -console = "0.15.8" -strum = { version = "0.26.2", features = ["derive"] } -tempfile = "3.1.0" -toml = "0.8.8" -walkdir = "2.4.0" -sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } -filetime = "0.2.16" -wasm-opt = "0.116" -parity-wasm = "0.45" +build-helper = { workspace = true } +cargo_metadata = { workspace = true } +console = { workspace = true } +strum = { features = ["derive"], workspace = true, default-features = true } +tempfile = { workspace = true } +toml = { workspace = true } +walkdir = { workspace = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +filetime = { workspace = true } +wasm-opt = { workspace = true } +parity-wasm = { workspace = true } polkavm-linker = { workspace = true } # Dependencies required for the `metadata-hash` feature. -merkleized-metadata = { version = "0.1.0", optional = true } -sc-executor = { path = "../../client/executor", optional = true } -sp-core = { path = "../../primitives/core", optional = true } -sp-io = { path = "../../primitives/io", optional = true } -sp-version = { path = "../../primitives/version", optional = true } -frame-metadata = { version = "16.0.0", features = ["current"], optional = true } -codec = { package = "parity-scale-codec", version = "3.1.5", optional = true } -array-bytes = { version = "6.1", optional = true } -sp-tracing = { path = "../../primitives/tracing", optional = true } +merkleized-metadata = { optional = true, workspace = true } +sc-executor = { optional = true, workspace = true, default-features = true } +sp-core = { optional = true, workspace = true, default-features = true } +sp-io = { optional = true, workspace = true, default-features = true } +sp-version = { optional = true, workspace = true, default-features = true } +frame-metadata = { features = ["current"], optional = true, workspace = true, default-features = true } +codec = { optional = true, workspace = true, default-features = true } +array-bytes = { optional = true, workspace = true, default-features = true } +sp-tracing = { optional = true, workspace = true, default-features = true } [features] # Enable support for generating the metadata hash. diff --git a/templates/minimal/Cargo.toml b/templates/minimal/Cargo.toml index ca00cb84284524fc7d43d3b95f9cf4bb795ec506..ba96e139bcf194694aea029ca67825f948f30624 100644 --- a/templates/minimal/Cargo.toml +++ b/templates/minimal/Cargo.toml @@ -10,13 +10,13 @@ edition.workspace = true publish = false [dependencies] -minimal-template-node = { path = "./node" } -minimal-template-runtime = { path = "./runtime" } -pallet-minimal-template = { path = "./pallets/template" } -polkadot-sdk-docs = { path = "../../docs/sdk" } +minimal-template-node = { workspace = true } +minimal-template-runtime = { workspace = true } +pallet-minimal-template = { workspace = true, default-features = true } +polkadot-sdk-docs = { workspace = true } -frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame" } +frame = { workspace = true, default-features = true } # How we build docs in rust-docs simple-mermaid = "0.1.1" -docify = "0.2.7" +docify = { workspace = true } diff --git a/templates/minimal/README.md b/templates/minimal/README.md index f00bfd4d48772ba7df8a035df757eb59d23626ca..b556a4536089004205f497ccbda871b3e811de7c 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -14,7 +14,7 @@ * ๐Ÿค This template is a minimal (in terms of complexity and the number of components) template for building a blockchain node. -* ๐Ÿ”ง Its runtime is configured of a single custom pallet as a starting point, and a handful of ready-made pallets +* ๐Ÿ”ง Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). * ๐Ÿ‘ค The template has no consensus configured - it is best for experimenting with a single node network. @@ -42,7 +42,7 @@ packages required to compile this template - please take note of the Rust compil ๐Ÿ”จ Use the following command to build the node without launching it: ```sh -cargo build --release +cargo build --package minimal-template-node --release ``` ๐Ÿณ Alternatively, build the docker image: @@ -65,8 +65,8 @@ docker run --rm polkadot-sdk-minimal-template --dev Development chains: * ๐Ÿงน Do not persist the state. -* ๐Ÿ’ฐ Are preconfigured with a genesis state that includes several prefunded development accounts. -* ๐Ÿง‘โ€โš–๏ธ Development accounts are used as `sudo` accounts. +* ๐Ÿ’ฐ Are pre-configured with a genesis state that includes several pre-funded development accounts. +* ๐Ÿง‘โ€โš–๏ธ One development account (`ALICE`) is used as `sudo` accounts. ### Connect with the Polkadot-JS Apps Front-End diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index a10364a2854a9acfa9f7bf69ee885bc155facb79..70b24c19f8e7ce1b02f4570e5f34b974d7be0d8a 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -14,47 +14,47 @@ build = "build.rs" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -docify = "0.2.0" -clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.30", features = ["thread-pool"] } -futures-timer = "3.0.1" -jsonrpsee = { version = "0.22", features = ["server"] } +docify = { workspace = true } +clap = { features = ["derive"], workspace = true } +futures = { features = ["thread-pool"], workspace = true } +futures-timer = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } serde_json = { workspace = true, default-features = true } -sc-cli = { path = "../../../substrate/client/cli" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-service = { path = "../../../substrate/client/service" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-manual-seal = { path = "../../../substrate/client/consensus/manual-seal" } -sc-rpc-api = { path = "../../../substrate/client/rpc-api" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } -sc-offchain = { path = "../../../substrate/client/offchain" } -sc-client-api = { path = "../../../substrate/client/api" } - -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-block-builder = { path = "../../../substrate/primitives/block-builder" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } - -substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } +sc-cli = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-manual-seal = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } + +sp-timestamp = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } + +substrate-frame-rpc-system = { workspace = true, default-features = true } # Once the native runtime is gone, there should be little to no dependency on FRAME here, and # certainly no dependency on the runtime. -frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", features = [ +frame = { features = [ "experimental", "runtime", -] } -runtime = { package = "minimal-template-runtime", path = "../runtime" } +], workspace = true, default-features = true } +minimal-template-runtime = { workspace = true } [build-dependencies] -substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = [] diff --git a/templates/minimal/node/src/chain_spec.rs b/templates/minimal/node/src/chain_spec.rs index 7a3475bb167334e05f570ad4b46e8d471ec5a9a8..5b53b0f80ac00ec8315f449c62283da195ab89e5 100644 --- a/templates/minimal/node/src/chain_spec.rs +++ b/templates/minimal/node/src/chain_spec.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use runtime::{BalancesConfig, SudoConfig, WASM_BINARY}; +use minimal_template_runtime::{BalancesConfig, SudoConfig, WASM_BINARY}; use sc_service::{ChainType, Properties}; use serde_json::{json, Value}; use sp_keyring::AccountKeyring; @@ -43,7 +43,7 @@ pub fn development_config() -> Result { /// Configure initial storage state for FRAME pallets. fn testnet_genesis() -> Value { use frame::traits::Get; - use runtime::interface::{Balance, MinimumBalance}; + use minimal_template_runtime::interface::{Balance, MinimumBalance}; let endowment = >::get().max(1) * 1000; let balances = AccountKeyring::iter() .map(|a| (a.to_account_id(), endowment)) diff --git a/templates/minimal/node/src/command.rs b/templates/minimal/node/src/command.rs index 432add922a7b585bba9be071a0a82dcd34e02dd7..c17f9bc55927ba46e0bb9ecad35b344229aff3c7 100644 --- a/templates/minimal/node/src/command.rs +++ b/templates/minimal/node/src/command.rs @@ -114,7 +114,9 @@ pub fn run() -> sc_cli::Result<()> { }, Some(Subcommand::ChainInfo(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(&config)) + runner.sync_run(|config| { + cmd.run::(&config) + }) }, None => { let runner = cli.create_runner(&cli.run)?; diff --git a/templates/minimal/node/src/rpc.rs b/templates/minimal/node/src/rpc.rs index 4b283bb2a66f4e18a6dffdbd2d387be93f8ff6a7..451e7b21dd0c1ce34ae5fa51a10271e699b56e88 100644 --- a/templates/minimal/node/src/rpc.rs +++ b/templates/minimal/node/src/rpc.rs @@ -23,7 +23,7 @@ #![warn(missing_docs)] use jsonrpsee::RpcModule; -use runtime::interface::{AccountId, Nonce, OpaqueBlock}; +use minimal_template_runtime::interface::{AccountId, Nonce, OpaqueBlock}; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use std::sync::Arc; diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs index 5a92627621bfcba71667ed198786a8f5e5aa9b5a..71b1ef65b6cad91d33039b13130abdcef414d5bd 100644 --- a/templates/minimal/node/src/service.rs +++ b/templates/minimal/node/src/service.rs @@ -16,7 +16,7 @@ // limitations under the License. use futures::FutureExt; -use runtime::{self, interface::OpaqueBlock as Block, RuntimeApi}; +use minimal_template_runtime::{interface::OpaqueBlock as Block, RuntimeApi}; use sc_client_api::backend::Backend; use sc_executor::WasmExecutor; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; @@ -34,8 +34,10 @@ type HostFunctions = #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = sp_io::SubstrateHostFunctions; +#[docify::export] pub(crate) type FullClient = sc_service::TFullClient>; + type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index f0abe3c6942de634e603f939aaa5ad8c19bcd20d..9d231fe7d7d45d3c4d1f35bee98d2bf7a57203a7 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -13,16 +13,16 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +codec = { features = [ "derive", -], default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } -frame = { package = "polkadot-sdk-frame", path = "../../../../substrate/frame", default-features = false, features = [ +], workspace = true } +frame = { features = [ "experimental", "runtime", -] } +], workspace = true } [features] diff --git a/templates/minimal/pallets/template/src/lib.rs b/templates/minimal/pallets/template/src/lib.rs index 713f014bbe61fc9fa7df5019c00afddebf02bbc6..92b90ad4412b0263eac5fc26bea7d835f932030f 100644 --- a/templates/minimal/pallets/template/src/lib.rs +++ b/templates/minimal/pallets/template/src/lib.rs @@ -1,4 +1,7 @@ //! A shell pallet built with [`frame`]. +//! +//! To get started with this pallet, try implementing the guide in +//! #![cfg_attr(not(feature = "std"), no_std)] diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index 42ea49ff404628ba1d2768302dc252c652a0ddaa..5d3cf8492e5227f2af4945d328acec216f1a5752 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -10,31 +10,31 @@ edition.workspace = true publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.6.0", default-features = false } +codec = { workspace = true } +scale-info = { workspace = true } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", default-features = false, features = [ +frame = { features = [ "experimental", "runtime", -] } +], workspace = true } # pallets that we want to use -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } # genesis builder that allows us to interact with runtime genesis config -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } +sp-genesis-builder = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } # local pallet templates -pallet-minimal-template = { path = "../pallets/template", default-features = false } +pallet-minimal-template = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index d2debbf5689fdf41b8436fb33eac52db5f573a3e..8c7867f4cc8cd1ec8b8435ec3c038e45da1a5700 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -99,27 +99,27 @@ mod runtime { /// Mandatory system pallet that should always be included in a FRAME runtime. #[runtime::pallet_index(0)] - pub type System = frame_system; + pub type System = frame_system::Pallet; /// Provides a way for consensus systems to set and check the onchain time. #[runtime::pallet_index(1)] - pub type Timestamp = pallet_timestamp; + pub type Timestamp = pallet_timestamp::Pallet; /// Provides the ability to keep track of balances. #[runtime::pallet_index(2)] - pub type Balances = pallet_balances; + pub type Balances = pallet_balances::Pallet; /// Provides a way to execute privileged functions. #[runtime::pallet_index(3)] - pub type Sudo = pallet_sudo; + pub type Sudo = pallet_sudo::Pallet; /// Provides the ability to charge for extrinsic execution. #[runtime::pallet_index(4)] - pub type TransactionPayment = pallet_transaction_payment; + pub type TransactionPayment = pallet_transaction_payment::Pallet; /// A minimal pallet template. #[runtime::pallet_index(5)] - pub type Template = pallet_minimal_template; + pub type Template = pallet_minimal_template::Pallet; } parameter_types! { diff --git a/templates/parachain/README.md b/templates/parachain/README.md index a6ac91799b77728c4116c3e98554d5dca66e9929..802d8586b39e091b67c45f86226743552d4e0a70 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -16,7 +16,7 @@ * โ˜๏ธ It is based on the [Cumulus](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) framework. -* ๐Ÿ”ง Its runtime is configured of a single custom pallet as a starting point, and a handful of ready-made pallets +* ๐Ÿ”ง Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). * ๐Ÿ‘‰ Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains) @@ -44,7 +44,7 @@ packages required to compile this template - please take note of the Rust compil ๐Ÿ”จ Use the following command to build the node without launching it: ```sh -cargo build --release +cargo build --package parachain-template-node --release ``` ๐Ÿณ Alternatively, build the docker image: @@ -70,7 +70,7 @@ and `zombienet` - into `PATH` like so: export PATH="./target/release/:$PATH" ``` -This way, we can conveniently use them un the following steps. +This way, we can conveniently use them in the following steps. ๐Ÿ‘ฅ The following command starts a local development chain, with a single relay chain node and a single parachain collator: diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index 1737c6a9df75e41b6d8252d8966c1464bfdc5bfb..7cf1f1fddc7b31e4adbb69f7e54f35cbcad28399 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -14,69 +14,69 @@ build = "build.rs" # name = "parachain-template-node" [dependencies] -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } +codec = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } -jsonrpsee = { version = "0.22", features = ["server"] } -futures = "0.3.28" +jsonrpsee = { features = ["server"], workspace = true } +futures = { workspace = true } serde_json = { workspace = true, default-features = true } -docify = "0.2.8" +docify = { workspace = true } # Local -parachain-template-runtime = { path = "../runtime" } +parachain-template-runtime = { workspace = true } # Substrate -frame-benchmarking = { path = "../../../substrate/frame/benchmarking" } -frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-cli" } -pallet-transaction-payment-rpc = { path = "../../../substrate/frame/transaction-payment/rpc" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-offchain = { path = "../../../substrate/client/offchain" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-sync = { path = "../../../substrate/client/network/sync" } -sc-rpc = { path = "../../../substrate/client/rpc" } -sc-service = { path = "../../../substrate/client/service" } -sc-sysinfo = { path = "../../../substrate/client/sysinfo" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-tracing = { path = "../../../substrate/client/tracing" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-block-builder = { path = "../../../substrate/primitives/block-builder" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } +frame-benchmarking = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Polkadot -polkadot-cli = { path = "../../../polkadot/cli", features = ["rococo-native"] } -polkadot-primitives = { path = "../../../polkadot/primitives" } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +polkadot-cli = { features = ["rococo-native"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +xcm = { workspace = true } # Cumulus -cumulus-client-cli = { path = "../../../cumulus/client/cli" } -cumulus-client-collator = { path = "../../../cumulus/client/collator" } -cumulus-client-consensus-aura = { path = "../../../cumulus/client/consensus/aura" } -cumulus-client-consensus-common = { path = "../../../cumulus/client/consensus/common" } -cumulus-client-consensus-proposer = { path = "../../../cumulus/client/consensus/proposer" } -cumulus-client-service = { path = "../../../cumulus/client/service" } -cumulus-primitives-core = { path = "../../../cumulus/primitives/core" } -cumulus-primitives-parachain-inherent = { path = "../../../cumulus/primitives/parachain-inherent" } -cumulus-relay-chain-interface = { path = "../../../cumulus/client/relay-chain-interface" } -color-print = "0.3.4" +cumulus-client-cli = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } +cumulus-client-consensus-aura = { workspace = true, default-features = true } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-service = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +color-print = { workspace = true } [build-dependencies] -substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = [] diff --git a/templates/parachain/node/README.md b/templates/parachain/node/README.md index 350272c7b6efe1e53b4ed13296a00c89a8cf0639..ad309d4015aab11942340464028ede2c84982ebc 100644 --- a/templates/parachain/node/README.md +++ b/templates/parachain/node/README.md @@ -7,7 +7,7 @@ โš™๏ธ It acts as a remote procedure call (RPC) server, allowing interaction with the blockchain. -๐Ÿ‘‰ Learn more about the architecture, and a difference between a node and a runtime +๐Ÿ‘‰ Learn more about the architecture, and the difference between a node and a runtime [here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/wasm_meta_protocol/index.html). ๐Ÿ‘‡ Here are the most important files in this node template: diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 587dd19faf3e8af6e8a8296333d61393e6fd52ac..3e7d4de105535af1c0d3b1188caacf9a162cedf9 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -12,6 +12,7 @@ use parachain_template_runtime::{ // Cumulus Imports use cumulus_client_collator::service::CollatorService; +#[docify::export(lookahead_collator)] use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; use cumulus_client_consensus_proposer::Proposer; @@ -20,6 +21,7 @@ use cumulus_client_service::{ BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, ParachainHostFunctions, StartRelayChainTasksParams, }; +#[docify::export(cumulus_primitives)] use cumulus_primitives_core::{ relay_chain::{CollatorPair, ValidationCode}, ParaId, @@ -33,7 +35,6 @@ use sc_client_api::Backend; use sc_consensus::ImportQueue; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::NetworkBlock; -use sc_network_sync::SyncingService; use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; @@ -170,7 +171,6 @@ fn start_consensus( task_manager: &TaskManager, relay_chain_interface: Arc, transaction_pool: Arc>, - sync_oracle: Arc>, keystore: KeystorePtr, relay_chain_slot_duration: Duration, para_id: ParaId, @@ -204,7 +204,6 @@ fn start_consensus( code_hash_provider: move |block_hash| { client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) }, - sync_oracle, keystore, collator_key, para_id, @@ -215,11 +214,9 @@ fn start_consensus( authoring_duration: Duration::from_millis(2000), reinitialize: false, }; - - let fut = - aura::run::( - params, - ); + let fut = aura::run::( + params, + ); task_manager.spawn_essential_handle().spawn("aura", None, fut); Ok(()) @@ -396,7 +393,6 @@ pub async fn start_parachain_node( &task_manager, relay_chain_interface, transaction_pool, - sync_service, params.keystore_container.keystore(), relay_chain_slot_duration, para_id, diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index 6c549c2c4a9b64f7bffd07f27bde04bcdb32442d..dde863101372e9b30a6aff3062618509b1743520 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -13,22 +13,24 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # frame deps -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } + +# primitive deps +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../../substrate/primitives/core" } -sp-io = { path = "../../../../substrate/primitives/io" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -40,12 +42,12 @@ runtime-benchmarks = [ ] std = [ "codec/std", + "scale-info/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", - "scale-info/std", - "sp-core/std", - "sp-io/std", + "sp-runtime/std", ] try-runtime = [ diff --git a/templates/parachain/pallets/template/src/benchmarking.rs b/templates/parachain/pallets/template/src/benchmarking.rs index d1a9554aed6dc0533f914e18661942ed56bcd514..5acad6e60decc5eb3e80833712fadfc0d42c1857 100644 --- a/templates/parachain/pallets/template/src/benchmarking.rs +++ b/templates/parachain/pallets/template/src/benchmarking.rs @@ -1,34 +1,33 @@ //! Benchmarking setup for pallet-template #![cfg(feature = "runtime-benchmarks")] -use super::*; -#[allow(unused)] -use crate::Pallet as Template; +use super::*; use frame_benchmarking::v2::*; -use frame_system::RawOrigin; #[benchmarks] mod benchmarks { use super::*; + #[cfg(test)] + use crate::pallet::Pallet as Template; + use frame_system::RawOrigin; #[benchmark] fn do_something() { - let value = 100u32; let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] - do_something(RawOrigin::Signed(caller), value); + do_something(RawOrigin::Signed(caller), 100); - assert_eq!(Something::::get(), Some(value)); + assert_eq!(Something::::get().map(|v| v.block_number), Some(100u32.into())); } #[benchmark] fn cause_error() { - Something::::put(100u32); + Something::::put(CompositeStruct { block_number: 100u32.into() }); let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] cause_error(RawOrigin::Signed(caller)); - assert_eq!(Something::::get(), Some(101u32)); + assert_eq!(Something::::get().map(|v| v.block_number), Some(101u32.into())); } impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/templates/parachain/pallets/template/src/lib.rs b/templates/parachain/pallets/template/src/lib.rs index 11587d1df426f485139eab9e333b292768c6c571..6bfb98972aedfc324edc3f064c7cf2d0d28d6a67 100644 --- a/templates/parachain/pallets/template/src/lib.rs +++ b/templates/parachain/pallets/template/src/lib.rs @@ -1,8 +1,52 @@ +//! # Template Pallet +//! +//! A pallet with minimal functionality to help developers understand the essential components of +//! writing a FRAME pallet. It is typically used in beginner tutorials or in Polkadot SDK template +//! as a starting point for creating a new pallet and **not meant to be used in production**. +//! +//! ## Overview +//! +//! This template pallet contains basic examples of: +//! - declaring a storage item that stores a single block-number +//! - declaring and using events +//! - declaring and using errors +//! - a dispatchable function that allows a user to set a new value to storage and emits an event +//! upon success +//! - another dispatchable function that causes a custom error to be thrown +//! +//! Each pallet section is annotated with an attribute using the `#[pallet::...]` procedural macro. +//! This macro generates the necessary code for a pallet to be aggregated into a FRAME runtime. +//! +//! To get started with pallet development, consider using this tutorial: +//! +//! +//! +//! And reading the main documentation of the `frame` crate: +//! +//! +//! +//! And looking at the frame [`kitchen-sink`](https://paritytech.github.io/polkadot-sdk/master/pallet_example_kitchensink/index.html) +//! pallet, a showcase of all pallet macros. +//! +//! ### Pallet Sections +//! +//! The pallet sections in this template are: +//! +//! - A **configuration trait** that defines the types and parameters which the pallet depends on +//! (denoted by the `#[pallet::config]` attribute). See: [`Config`]. +//! - A **means to store pallet-specific data** (denoted by the `#[pallet::storage]` attribute). +//! See: [`storage_types`]. +//! - A **declaration of the events** this pallet emits (denoted by the `#[pallet::event]` +//! attribute). See: [`Event`]. +//! - A **declaration of the errors** that this pallet can throw (denoted by the `#[pallet::error]` +//! attribute). See: [`Error`]. +//! - A **set of dispatchable functions** that define the pallet's functionality (denoted by the +//! `#[pallet::call]` attribute). See: [`dispatchables`]. +//! +//! Run `cargo doc --package pallet-template --open` to view this pallet's documentation. + #![cfg_attr(not(feature = "std"), no_std)] -/// Edit this file to define custom logic or remove it if it is not needed. -/// Learn more about FRAME and the core library of Substrate FRAME pallets: -/// pub use pallet::*; #[cfg(test)] @@ -16,16 +60,25 @@ pub mod weights; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +// +// +// +// To see a full list of `pallet` macros and their use cases, see: +// +// #[frame_support::pallet] pub mod pallet { - use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*}; + use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*, DefaultNoBound}; use frame_system::pallet_prelude::*; + use sp_runtime::traits::{CheckedAdd, One}; /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. + /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// A type representing the weights required by the dispatchables of this pallet. type WeightInfo: crate::weights::WeightInfo; } @@ -33,24 +86,34 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - // The pallet's runtime storage items. - // https://docs.substrate.io/v3/runtime/storage + /// A struct to store a single block-number. Has all the right derives to store it in storage. + /// + #[derive( + Encode, Decode, MaxEncodedLen, TypeInfo, CloneNoBound, PartialEqNoBound, DefaultNoBound, + )] + #[scale_info(skip_type_params(T))] + pub struct CompositeStruct { + /// A block number. + pub(crate) block_number: BlockNumberFor, + } + + /// The pallet's storage items. + /// + /// #[pallet::storage] - // Learn more about declaring storage items: - // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items - pub type Something = StorageValue<_, u32>; + pub type Something = StorageValue<_, CompositeStruct>; - // Pallets use events to inform users when important changes are made. - // https://docs.substrate.io/v3/runtime/events-and-errors + /// Pallets use events to inform users when important changes are made. + /// #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Event documentation should end with an array that provides descriptive names for event - /// parameters. [something, who] - SomethingStored(u32, T::AccountId), + /// We usually use passive tense for events. + SomethingStored { block_number: BlockNumberFor, who: T::AccountId }, } - // Errors inform users that something went wrong. + /// Errors inform users that something went wrong. + /// #[pallet::error] pub enum Error { /// Error names should be descriptive. @@ -62,27 +125,33 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet {} - // Dispatchable functions allows users to interact with the pallet and invoke state changes. - // These functions materialize as "extrinsics", which are often compared to transactions. - // Dispatchable functions must be annotated with a weight and must return a DispatchResult. + /// Dispatchable functions allows users to interact with the pallet and invoke state changes. + /// These functions materialize as "extrinsics", which are often compared to transactions. + /// Dispatchable functions must be annotated with a weight and must return a DispatchResult. + /// #[pallet::call] impl Pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. #[pallet::call_index(0)] #[pallet::weight(Weight::from_parts(10_000, 0) + T::DbWeight::get().writes(1))] - pub fn do_something(origin: OriginFor, something: u32) -> DispatchResultWithPostInfo { + pub fn do_something(origin: OriginFor, bn: u32) -> DispatchResultWithPostInfo { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. - // https://docs.substrate.io/v3/runtime/origins + // let who = ensure_signed(origin)?; + // Convert the u32 into a block number. This is possible because the set of trait bounds + // defined in [`frame_system::Config::BlockNumber`]. + let block_number: BlockNumberFor = bn.into(); + // Update storage. - >::put(something); + >::put(CompositeStruct { block_number }); // Emit an event. - Self::deposit_event(Event::SomethingStored(something, who)); - // Return a successful DispatchResultWithPostInfo + Self::deposit_event(Event::SomethingStored { block_number, who }); + + // Return a successful [`DispatchResultWithPostInfo`] or [`DispatchResult`]. Ok(().into()) } @@ -96,11 +165,19 @@ pub mod pallet { match >::get() { // Return an error if the value has not been set. None => Err(Error::::NoneValue)?, - Some(old) => { + Some(mut old) => { // Increment the value read from storage; will error in the event of overflow. - let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; + old.block_number = old + .block_number + .checked_add(&One::one()) + // ^^ equivalent is to: + // .checked_add(&1u32.into()) + // both of which build a `One` instance for the type `BlockNumber`. + .ok_or(Error::::StorageOverflow)?; // Update the value in storage with the incremented result. - >::put(new); + >::put(old); + // Explore how you can rewrite this using + // [`frame_support::storage::StorageValue::mutate`]. Ok(().into()) }, } diff --git a/templates/parachain/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs index ebb0598df97bf215321583a8cb0821ecddce1262..46e3117596f599ca923425dd72be7c01421b4ffa 100644 --- a/templates/parachain/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -1,25 +1,36 @@ -use frame_support::{derive_impl, parameter_types}; -use frame_system as system; -use sp_runtime::BuildStorage; - -type Block = frame_system::mocking::MockBlock; +use frame_support::{derive_impl, weights::constants::RocksDbWeight}; +use frame_system::{mocking::MockBlock, GenesisConfig}; +use sp_runtime::{traits::ConstU64, BuildStorage}; // Configure a mock runtime to test the pallet. -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - TemplateModule: crate::{Pallet, Call, Storage, Event}, - } -); +#[frame_support::runtime] +mod test_runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Test; -parameter_types! { - pub const SS58Prefix: u8 = 42; + #[runtime::pallet_index(0)] + pub type System = frame_system; + #[runtime::pallet_index(1)] + pub type TemplateModule = crate; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl system::Config for Test { - type Block = Block; +impl frame_system::Config for Test { + type Nonce = u64; + type Block = MockBlock; + type BlockHashCount = ConstU64<250>; + type DbWeight = RocksDbWeight; } impl crate::Config for Test { @@ -29,5 +40,5 @@ impl crate::Config for Test { // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::::default().build_storage().unwrap().into() + GenesisConfig::::default().build_storage().unwrap().into() } diff --git a/templates/parachain/pallets/template/src/tests.rs b/templates/parachain/pallets/template/src/tests.rs index 9ad3076be2cc9927063e1b50c293cafadf8f3361..a4a41af63c2e9df1d438e22f13da3ea01fcf7a7b 100644 --- a/templates/parachain/pallets/template/src/tests.rs +++ b/templates/parachain/pallets/template/src/tests.rs @@ -7,7 +7,7 @@ fn it_works_for_default_value() { // Dispatch a signed extrinsic. assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); // Read pallet storage and assert an expected result. - assert_eq!(Something::::get(), Some(42)); + assert_eq!(Something::::get().map(|v| v.block_number), Some(42)); }); } diff --git a/templates/parachain/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs index 7c42936e09f292de831d28460a3bc39436c3323f..5bfe28e8b71e36d7ee06127b3c1f769617e96494 100644 --- a/templates/parachain/pallets/template/src/weights.rs +++ b/templates/parachain/pallets/template/src/weights.rs @@ -4,7 +4,7 @@ //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` +//! HOSTNAME: `_`, CPU: `` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 059c793679694bd9c11b10384383f6e20301e5c5..939fa245d2a0c1b6e03ecd0541a2b9aa63af75bd 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -13,80 +13,79 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } -docify = "0.2.8" +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } +docify = { workspace = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -hex-literal = { version = "0.4.1", optional = true } +], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = [ +scale-info = { features = [ "derive", -] } -smallvec = "1.11.0" -docify = "0.2.8" +], workspace = true } +smallvec = { workspace = true, default-features = true } +docify = { workspace = true } # Local -pallet-parachain-template = { path = "../pallets/template", default-features = false } +pallet-parachain-template = { workspace = true } # Substrate / FRAME -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } # FRAME Pallets -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-session = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } # Substrate Primitives -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../cumulus/pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../cumulus/pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../cumulus/pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../cumulus/pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../cumulus/primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../cumulus/primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../cumulus/primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../cumulus/primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../cumulus/pallets/collator-selection", default-features = false } -parachains-common = { path = "../../../cumulus/parachains/common", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../cumulus/parachains/pallets/parachain-info", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachains-common = { workspace = true } +parachain-info = { workspace = true } [features] default = ["std"] @@ -136,7 +135,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs index 107956ded410464757c767b81d1ac88da9aba1a4..f5d5d3e63027be2d7cdb20fb24bb4c6dcf3c25c9 100644 --- a/templates/parachain/runtime/src/apis.rs +++ b/templates/parachain/runtime/src/apis.rs @@ -24,6 +24,7 @@ // For more information, please refer to // External crates imports +use alloc::vec::Vec; use frame_support::{ genesis_builder_helper::{build_state, get_preset}, weights::Weight, @@ -37,7 +38,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; -use sp_std::prelude::Vec; use sp_version::RuntimeVersion; // Local module imports @@ -47,10 +47,26 @@ use super::{ SLOT_DURATION, VERSION, }; +// we move some impls outside so we can easily use them with `docify`. +impl Runtime { + #[docify::export] + fn impl_slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) + } + + #[docify::export] + fn impl_can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } +} + impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) + Runtime::impl_slot_duration() } fn authorities() -> Vec { @@ -63,7 +79,7 @@ impl_runtime_apis! { included_hash: ::Hash, slot: cumulus_primitives_aura::Slot, ) -> bool { - ConsensusHook::can_build_upon(included_hash, slot) + Runtime::impl_can_build_upon(included_hash, slot) } } @@ -90,7 +106,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> Vec { Runtime::metadata_versions() } } @@ -248,7 +264,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs index 63e6a67a90638266820bce44e8aee75a544681bc..204e74a11e6a195bc2ef05302ff2695034659ecd 100644 --- a/templates/parachain/runtime/src/configs/mod.rs +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -32,7 +32,9 @@ use frame_support::{ derive_impl, dispatch::DispatchClass, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin, VariantCountOf, + }, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -154,8 +156,8 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; } parameter_types! { @@ -263,6 +265,7 @@ impl pallet_session::Config for Runtime { type WeightInfo = (); } +#[docify::export(aura_config)] impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 987b88af8444dac73fa1b8972e78973f35ea869d..012ad1d097bfaccbb5968662641888616da274b9 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -7,9 +7,13 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod apis; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarks; mod configs; mod weights; +extern crate alloc; +use alloc::{boxed::Box, vec::Vec}; use smallvec::smallvec; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, @@ -17,17 +21,13 @@ use sp_runtime::{ MultiSignature, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use frame_support::{ - construct_runtime, - weights::{ - constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, - WeightToFeeCoefficients, WeightToFeePolynomial, - }, +use frame_support::weights::{ + constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, + WeightToFeePolynomial, }; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; @@ -169,17 +169,21 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 1, }; -/// This determines the average expected block time that we are targeting. -/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. -/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked -/// up by `pallet_aura` to implement `fn slot_duration()`. -/// -/// Change this to adjust the block time. -pub const MILLISECS_PER_BLOCK: u64 = 6000; - -// NOTE: Currently it is not possible to change the slot duration after the chain has started. -// Attempting to do so will brick block production. -pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; +#[docify::export] +mod block_times { + /// This determines the average expected block time that we are targeting. Blocks will be + /// produced at a minimum duration defined by `SLOT_DURATION`. `SLOT_DURATION` is picked up by + /// `pallet_timestamp` which is in turn picked up by `pallet_aura` to implement `fn + /// slot_duration()`. + /// + /// Change this to adjust the block time. + pub const MILLISECS_PER_BLOCK: u64 = 6000; + + // NOTE: Currently it is not possible to change the slot duration after the chain has started. + // Attempting to do so will brick block production. + pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; +} +pub use block_times::*; // Time is measured by number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); @@ -202,21 +206,27 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(5); /// `Operational` extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +#[docify::export(max_block_weight)] /// We allow for 2 seconds of compute with a 6 second average block time. const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, ); -/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included -/// into the relay chain. -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; -/// How many parachain blocks are processed by the relay chain per parent. Limits the -/// number of blocks authored per slot. -const BLOCK_PROCESSING_VELOCITY: u32 = 1; -/// Relay chain slot duration, in milliseconds. -const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +#[docify::export] +mod async_backing_params { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included + /// into the relay chain. + pub(crate) const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; + /// How many parachain blocks are processed by the relay chain per parent. Limits the + /// number of blocks authored per slot. + pub(crate) const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub(crate) const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} +pub(crate) use async_backing_params::*; +#[docify::export] /// Aura consensus hook type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< Runtime, @@ -232,43 +242,70 @@ pub fn native_version() -> NativeVersion { } // Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime { - // System support stuff. - System: frame_system = 0, - ParachainSystem: cumulus_pallet_parachain_system = 1, - Timestamp: pallet_timestamp = 2, - ParachainInfo: parachain_info = 3, - - // Monetary stuff. - Balances: pallet_balances = 10, - TransactionPayment: pallet_transaction_payment = 11, - - // Governance - Sudo: pallet_sudo = 15, - - // Collator support. The order of these 4 are important and shall not change. - Authorship: pallet_authorship = 20, - CollatorSelection: pallet_collator_selection = 21, - Session: pallet_session = 22, - Aura: pallet_aura = 23, - AuraExt: cumulus_pallet_aura_ext = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue = 30, - PolkadotXcm: pallet_xcm = 31, - CumulusXcm: cumulus_pallet_xcm = 32, - MessageQueue: pallet_message_queue = 33, - - // Template - TemplatePallet: pallet_parachain_template = 50, - } -); +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + #[runtime::pallet_index(1)] + pub type ParachainSystem = cumulus_pallet_parachain_system; + #[runtime::pallet_index(2)] + pub type Timestamp = pallet_timestamp; + #[runtime::pallet_index(3)] + pub type ParachainInfo = parachain_info; + + // Monetary stuff. + #[runtime::pallet_index(10)] + pub type Balances = pallet_balances; + #[runtime::pallet_index(11)] + pub type TransactionPayment = pallet_transaction_payment; + + // Governance + #[runtime::pallet_index(15)] + pub type Sudo = pallet_sudo; + + // Collator support. The order of these 4 are important and shall not change. + #[runtime::pallet_index(20)] + pub type Authorship = pallet_authorship; + #[runtime::pallet_index(21)] + pub type CollatorSelection = pallet_collator_selection; + #[runtime::pallet_index(22)] + pub type Session = pallet_session; + #[runtime::pallet_index(23)] + pub type Aura = pallet_aura; + #[runtime::pallet_index(24)] + pub type AuraExt = cumulus_pallet_aura_ext; + + // XCM helpers. + #[runtime::pallet_index(30)] + pub type XcmpQueue = cumulus_pallet_xcmp_queue; + #[runtime::pallet_index(31)] + pub type PolkadotXcm = pallet_xcm; + #[runtime::pallet_index(32)] + pub type CumulusXcm = cumulus_pallet_xcm; + #[runtime::pallet_index(33)] + pub type MessageQueue = pallet_message_queue; + + // Template + #[runtime::pallet_index(50)] + pub type TemplatePallet = pallet_parachain_template; +} +#[docify::export(register_validate_block)] cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarks; diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 2e3b1146a8fdebbdb4488af29831662a951733c9..c5dc5db7f3b51c65fda0a6f74bb56a424e4e384e 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -28,7 +28,7 @@ installation](#alternatives-installations) options. Use the following command to build the node without launching it: ```sh -cargo build --release +cargo build --package solochain-template-node --release ``` ### Embedded Docs @@ -37,7 +37,7 @@ After you build the project, you can use the following command to explore its parameters and subcommands: ```sh -./target/release/node-template -h +./target/release/solochain-template-node -h ``` You can generate and view the [Rust @@ -54,19 +54,19 @@ The following command starts a single-node development chain that doesn't persist state: ```sh -./target/release/node-template --dev +./target/release/solochain-template-node --dev ``` To purge the development chain's state, run the following command: ```sh -./target/release/node-template purge-chain --dev +./target/release/solochain-template-node purge-chain --dev ``` To start the development chain with detailed logging, run the following command: ```sh -RUST_BACKTRACE=1 ./target/release/node-template -ldebug --dev +RUST_BACKTRACE=1 ./target/release/solochain-template-node -ldebug --dev ``` Development chains: @@ -75,7 +75,7 @@ Development chains: - Use the **Alice** and **Bob** accounts as default validator authorities. - Use the **Alice** account as the default `sudo` account. - Are preconfigured with a genesis state (`/node/src/chain_spec.rs`) that - includes several prefunded development accounts. + includes several pre-funded development accounts. To persist chain state between runs, specify a base path by running a command @@ -86,7 +86,7 @@ similar to the following: $ mkdir my-chain-state // Use of that folder to store the chain state -$ ./target/release/node-template --dev --base-path ./my-chain-state/ +$ ./target/release/solochain-template-node --dev --base-path ./my-chain-state/ // Check the folder structure created inside the base path after running the chain $ ls ./my-chain-state @@ -142,7 +142,7 @@ following: file that defines a Substrate chain's initial (genesis) state. Chain specifications are useful for development and testing, and critical when architecting the launch of a production chain. Take note of the - `development_config` and `testnet_genesis` functions,. These functions are + `development_config` and `testnet_genesis` functions. These functions are used to define the genesis state for the local development chain configuration. These functions identify some [well-known accounts](https://docs.substrate.io/reference/command-line-tools/subkey/) and diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index 4e8b81840900d3365f569ce96ecb642eb560d6d9..068284c6c3ea7c63e57a9c9039e091488e31a6a8 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -15,54 +15,54 @@ build = "build.rs" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.30", features = ["thread-pool"] } +clap = { features = ["derive"], workspace = true } +futures = { features = ["thread-pool"], workspace = true } serde_json = { workspace = true, default-features = true } -jsonrpsee = { version = "0.22", features = ["server"] } +jsonrpsee = { features = ["server"], workspace = true } # substrate client -sc-cli = { path = "../../../substrate/client/cli" } -sp-core = { path = "../../../substrate/primitives/core" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-service = { path = "../../../substrate/client/service" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sc-offchain = { path = "../../../substrate/client/offchain" } -sc-consensus-aura = { path = "../../../substrate/client/consensus/aura" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-grandpa = { path = "../../../substrate/client/consensus/grandpa" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-rpc-api = { path = "../../../substrate/client/rpc-api" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } +sc-cli = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } # substrate primitives -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -sp-inherents = { path = "../../../substrate/primitives/inherents" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-block-builder = { path = "../../../substrate/primitives/block-builder" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } # frame and pallets -frame-system = { path = "../../../substrate/frame/system" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc = { path = "../../../substrate/frame/transaction-payment/rpc" } -substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } +frame-system = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } # These dependencies are used for runtime benchmarking -frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-cli" } +frame-benchmarking-cli = { workspace = true, default-features = true } # Local Dependencies -solochain-template-runtime = { path = "../runtime" } +solochain-template-runtime = { workspace = true } [build-dependencies] -substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = [] diff --git a/templates/solochain/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml index 5b8349b5d678cdcf72befebf77b8d4598d61b261..e658a30d368483d81aa343d6a4a7d2a7a416e000 100644 --- a/templates/solochain/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -13,22 +13,22 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # frame deps -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../../substrate/primitives/core" } -sp-io = { path = "../../../../substrate/primitives/io" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 0af3899a666995120738459be823a70debd23168..f06c80b9a03213746768dbd670fe111c6512ef03 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -13,67 +13,66 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", "serde", -] } +], workspace = true } # frame -frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental"] } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +frame-executive = { workspace = true } # frame pallets -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-aura = { workspace = true } +pallet-balances = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } # primitives -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false, features = [ +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { features = [ "serde", -] } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false, features = [ +], workspace = true } +sp-consensus-grandpa = { features = [ "serde", -] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = [ +], workspace = true } +sp-core = { features = [ "serde", -] } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = [ +], workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { features = [ "serde", -] } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false, features = [ +], workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { features = [ "serde", -] } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } +], workspace = true } +sp-genesis-builder = { workspace = true } # RPC related -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +frame-system-rpc-runtime-api = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } # Used for runtime benchmarking -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } # The pallet in this template. -pallet-template = { path = "../pallets/template", default-features = false } +pallet-template = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -109,7 +108,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs index 93a56fb0ad78f738e12b5172d96d8188ca0b00e8..936ecd1bd388f35c45f6ec7350576b48f5641c56 100644 --- a/templates/solochain/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -3,6 +3,8 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +extern crate alloc; +use alloc::{boxed::Box, vec, vec::Vec}; use pallet_grandpa::AuthorityId as GrandpaId; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -13,12 +15,10 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use frame_support::genesis_builder_helper::{build_state, get_preset}; pub use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ @@ -33,6 +33,10 @@ pub use frame_support::{ }, StorageValue, }; +use frame_support::{ + genesis_builder_helper::{build_state, get_preset}, + traits::VariantCountOf, +}; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_timestamp::Call as TimestampCall; @@ -218,10 +222,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ConstU128; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeHoldReason; } parameter_types! { @@ -367,7 +371,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> Vec { Runtime::metadata_versions() } } diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index d790b4f5949ca5259bc4749636b90f5f33f1412c..94ba09421d409e5a2daee0c702499be3df2d4dcd 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -68,6 +68,7 @@ std = [ "pallet-asset-conversion?/std", "pallet-asset-rate?/std", "pallet-asset-tx-payment?/std", + "pallet-assets-freezer?/std", "pallet-assets?/std", "pallet-atomic-swap?/std", "pallet-aura?/std", @@ -239,7 +240,7 @@ std = [ "substrate-bip39?/std", "testnet-parachains-constants?/std", "westend-runtime-constants?/std", - "xcm-fee-payment-runtime-api?/std", + "xcm-runtime-apis?/std", ] runtime-benchmarks = [ "assets-common?/runtime-benchmarks", @@ -263,6 +264,7 @@ runtime-benchmarks = [ "pallet-asset-conversion?/runtime-benchmarks", "pallet-asset-rate?/runtime-benchmarks", "pallet-asset-tx-payment?/runtime-benchmarks", + "pallet-assets-freezer?/runtime-benchmarks", "pallet-assets?/runtime-benchmarks", "pallet-babe?/runtime-benchmarks", "pallet-bags-list?/runtime-benchmarks", @@ -363,7 +365,7 @@ runtime-benchmarks = [ "staging-node-inspect?/runtime-benchmarks", "staging-xcm-builder?/runtime-benchmarks", "staging-xcm-executor?/runtime-benchmarks", - "xcm-fee-payment-runtime-api?/runtime-benchmarks", + "xcm-runtime-apis?/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext?/try-runtime", @@ -385,6 +387,7 @@ try-runtime = [ "pallet-asset-conversion?/try-runtime", "pallet-asset-rate?/try-runtime", "pallet-asset-tx-payment?/try-runtime", + "pallet-assets-freezer?/try-runtime", "pallet-assets?/try-runtime", "pallet-atomic-swap?/try-runtime", "pallet-aura?/try-runtime", @@ -536,7 +539,7 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime = ["assets-common", "binary-merkle-tree", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-cumulus", "bp-bridge-hub-kusama", "bp-bridge-hub-polkadot", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", "bp-header-chain", "bp-kusama", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-bulletin", "bp-polkadot-core", "bp-relayers", "bp-rococo", "bp-runtime", "bp-test-utils", "bp-westend", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "rococo-runtime-constants", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "westend-runtime-constants", "xcm-fee-payment-runtime-api", "xcm-procedural"] +runtime = ["assets-common", "binary-merkle-tree", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-cumulus", "bp-bridge-hub-kusama", "bp-bridge-hub-polkadot", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", "bp-header-chain", "bp-kusama", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-bulletin", "bp-polkadot-core", "bp-relayers", "bp-rococo", "bp-runtime", "bp-test-utils", "bp-westend", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "rococo-runtime-constants", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "westend-runtime-constants", "xcm-procedural", "xcm-runtime-apis"] node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] tuples-96 = [ "frame-support-procedural?/tuples-96", @@ -854,6 +857,11 @@ path = "../substrate/frame/assets" default-features = false optional = true +[dependencies.pallet-assets-freezer] +path = "../substrate/frame/assets-freezer" +default-features = false +optional = true + [dependencies.pallet-atomic-swap] path = "../substrate/frame/atomic-swap" default-features = false @@ -1724,13 +1732,13 @@ path = "../polkadot/runtime/westend/constants" default-features = false optional = true -[dependencies.xcm-fee-payment-runtime-api] -path = "../polkadot/xcm/xcm-fee-payment-runtime-api" +[dependencies.xcm-procedural] +path = "../polkadot/xcm/procedural" default-features = false optional = true -[dependencies.xcm-procedural] -path = "../polkadot/xcm/procedural" +[dependencies.xcm-runtime-apis] +path = "../polkadot/xcm/xcm-runtime-apis" default-features = false optional = true diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 78b34ba179b7dd7fd7b08980d14e58d624c480dc..58a5691961d9b8abc01098ef75065d92881127db 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -360,6 +360,10 @@ pub use pallet_asset_tx_payment; #[cfg(feature = "pallet-assets")] pub use pallet_assets; +/// Provides freezing features to `pallet-assets`. +#[cfg(feature = "pallet-assets-freezer")] +pub use pallet_assets_freezer; + /// FRAME atomic swap pallet. #[cfg(feature = "pallet-atomic-swap")] pub use pallet_atomic_swap; @@ -1556,14 +1560,14 @@ pub use westend_runtime_constants; #[cfg(feature = "xcm-emulator")] pub use xcm_emulator; -/// XCM fee payment runtime API. -#[cfg(feature = "xcm-fee-payment-runtime-api")] -pub use xcm_fee_payment_runtime_api; - /// Procedural macros for XCM. #[cfg(feature = "xcm-procedural")] pub use xcm_procedural; +/// XCM runtime APIs. +#[cfg(feature = "xcm-runtime-apis")] +pub use xcm_runtime_apis; + /// Test kit to simulate cross-chain message passing and XCM execution. #[cfg(feature = "xcm-simulator")] pub use xcm_simulator;