diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml
index efcf278c46e83630a54fae3de01d0c9e19304dee..324c9bfff7a54cf594726328097f88bb23c9d951 100644
--- a/.github/workflows/fmt-check.yml
+++ b/.github/workflows/fmt-check.yml
@@ -15,7 +15,7 @@ jobs:
os: ["ubuntu-latest"]
runs-on: ${{ matrix.os }}
container:
- image: docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109
+ image: docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml
index 12891ef70af36b4e848c68e292f9ba2881ee20d2..430b1e26646758ef40218760fbd20f3a448f45f3 100644
--- a/.github/workflows/release-30_publish_release_draft.yml
+++ b/.github/workflows/release-30_publish_release_draft.yml
@@ -42,7 +42,6 @@ jobs:
URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb
wget $URL -O tera.deb
sudo dpkg -i tera.deb
- tera --version
- name: Download artifacts
uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
@@ -70,7 +69,7 @@ jobs:
export REF1=$(get_latest_release_tag)
if [[ -z "${{ inputs.version }}" ]]; then
- export REF2="${{ github.ref }}"
+ export REF2="${{ github.ref_name }}"
else
export REF2="${{ inputs.version }}"
fi
@@ -79,10 +78,6 @@ jobs:
./scripts/release/build-changelogs.sh
- echo "Checking the folder state"
- pwd
- ls -la scripts/release
-
- name: Archive artifact context.json
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with:
@@ -151,5 +146,5 @@ jobs:
access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
server: m.parity.io
message: |
- **New version of polkadot tagged**: ${{ github.ref }}
+ **New version of polkadot tagged**: ${{ github.ref_name }}
Draft release created: ${{ needs.publish-release-draft.outputs.release_url }}
diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml
index 5b036115b2386c366b2f1e78e9ce1dc7d526eedd..f1401406ae47afd3230cc163f35df0e3bcbac7b7 100644
--- a/.github/workflows/review-bot.yml
+++ b/.github/workflows/review-bot.yml
@@ -5,28 +5,41 @@ on:
- Review-Trigger
types:
- completed
+ workflow_dispatch:
+ inputs:
+ pr-number:
+ description: "Number of the PR to evaluate"
+ required: true
+ type: number
jobs:
review-approvals:
runs-on: ubuntu-latest
environment: master
steps:
+ - name: Generate token
+ id: app_token
+ uses: actions/create-github-app-token@v1.9.3
+ with:
+ app-id: ${{ secrets.REVIEW_APP_ID }}
+ private-key: ${{ secrets.REVIEW_APP_KEY }}
- name: Extract content of artifact
+ if: ${{ !inputs.pr-number }}
id: number
- uses: Bullrich/extract-text-from-artifact@v1.0.0
+ uses: Bullrich/extract-text-from-artifact@v1.0.1
with:
artifact-name: pr_number
- - name: Generate token
- id: app_token
- uses: tibdex/github-app-token@v1
- with:
- app_id: ${{ secrets.REVIEW_APP_ID }}
- private_key: ${{ secrets.REVIEW_APP_KEY }}
- name: "Evaluates PR reviews and assigns reviewers"
uses: paritytech/review-bot@v2.4.0
with:
repo-token: ${{ steps.app_token.outputs.token }}
team-token: ${{ steps.app_token.outputs.token }}
checks-token: ${{ steps.app_token.outputs.token }}
- pr-number: ${{ steps.number.outputs.content }}
+ # This is extracted from the triggering event
+ pr-number: ${{ inputs.pr-number || steps.number.outputs.content }}
request-reviewers: true
+ - name: Log payload
+ if: ${{ failure() || runner.debug }}
+ run: echo "::debug::$payload"
+ env:
+ payload: ${{ toJson(github.event) }}
diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml
index 8b23dd30bb29ad7879543c064c3eb711cc87895d..ec4a62afc0c780fcb13e7bc73228bb2e77e6a582 100644
--- a/.github/workflows/review-trigger.yml
+++ b/.github/workflows/review-trigger.yml
@@ -21,6 +21,43 @@ jobs:
- name: Skip merge queue
if: ${{ contains(github.ref, 'gh-readonly-queue') }}
run: exit 0
+ - name: Get PR data
+ id: comments
+ run: |
+ echo "bodies=$(gh pr view ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --json comments --jq '[.comments[].body]')" >> "$GITHUB_OUTPUT"
+ echo "reviews=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews --jq '[.[].state]')" >> "$GITHUB_OUTPUT"
+ env:
+ GH_TOKEN: ${{ github.token }}
+ - name: Fail when author pushes new code
+ # Require new reviews when the author is pushing and he is not a member
+ if: |
+ contains(fromJson(steps.comments.outputs.reviews), 'APPROVED') &&
+ github.event_name == 'pull_request_target' &&
+ github.event.action == 'synchronize' &&
+ github.event.sender.login == github.event.pull_request.user.login &&
+ github.event.pull_request.author_association != 'CONTRIBUTOR' &&
+ github.event.pull_request.author_association != 'MEMBER'
+ run: |
+ echo "User's association is ${{ github.event.pull_request.author_association }}"
+ # We get the list of reviewers who approved the PR
+ REVIEWERS=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews \
+ --jq '{reviewers: [.[] | select(.state == "APPROVED") | .user.login]}')
+
+ # We request them to review again
+ echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/requested_reviewers --input -
+
+ echo "::error::Project needs to be reviewed again"
+ exit 1
+ env:
+ GH_TOKEN: ${{ github.token }}
+ - name: Comment requirements
+ # If the previous step failed and github-actions hasn't commented yet we comment instructions
+ if: failure() && !contains(fromJson(steps.comments.outputs.bodies), 'Review required! Latest push from author must always be reviewed')
+ run: |
+ gh pr comment ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed"
+ env:
+ GH_TOKEN: ${{ github.token }}
+ COMMENTS: ${{ steps.comments.outputs.users }}
- name: Get PR number
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
@@ -28,7 +65,7 @@ jobs:
echo "Saving PR number: $PR_NUMBER"
mkdir -p ./pr
echo $PR_NUMBER > ./pr/pr_number
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
name: Save PR number
with:
name: pr_number
diff --git a/.github/workflows/sync-templates.yml b/.github/workflows/sync-templates.yml
index 511c9d0e8cd06f7b4b7b16126d6565cae9047a00..3617d6c34a3e342e1abd52acaffbf3a8c61ec43b 100644
--- a/.github/workflows/sync-templates.yml
+++ b/.github/workflows/sync-templates.yml
@@ -61,7 +61,7 @@ jobs:
- name: Install toml-cli
run: cargo install --git https://github.com/gnprice/toml-cli --rev ea69e9d2ca4f0f858110dc7a5ae28bcb918c07fb # v0.2.3
- name: Install Polkadot SDK Version Manager
- run: cargo install --git https://github.com/paritytech/psvm --rev c41261ffb52ab0c115adbbdb17e2cb7900d2bdfd psvm # master
+ run: cargo install --git https://github.com/paritytech/psvm psvm
- name: Rust compilation prerequisites
run: |
sudo apt update
diff --git a/.github/workflows/test-github-actions.yml b/.github/workflows/test-github-actions.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c8ce49cb462b07b135d33158408958aa88a2aa21
--- /dev/null
+++ b/.github/workflows/test-github-actions.yml
@@ -0,0 +1,46 @@
+name: test-github-actions
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened, ready_for_review]
+ merge_group:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
+env:
+ CARGO_NET_GIT_FETCH_WITH_CLI: true
+
+jobs:
+ test-linux-stable-int:
+ runs-on: arc-runners-polkadot-sdk
+ timeout-minutes: 30
+ container:
+ image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408"
+ env:
+ RUSTFLAGS: "-C debug-assertions -D warnings"
+ RUST_BACKTRACE: 1
+ WASM_BUILD_NO_COLOR: 1
+ WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings"
+ # Ensure we run the UI tests.
+ RUN_UI_TESTS: 1
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: script
+ run: WASM_BUILD_NO_COLOR=1 time cargo test -p staging-node-cli --release --locked -- --ignored
+ quick-benchmarks:
+ runs-on: arc-runners-polkadot-sdk
+ timeout-minutes: 30
+ container:
+ image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408"
+ env:
+ RUSTFLAGS: "-C debug-assertions -D warnings"
+ RUST_BACKTRACE: "full"
+ WASM_BUILD_NO_COLOR: 1
+ WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings"
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: script
+ run: time cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet
diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml
index 89b2c00db9b2b13187a562987e00abcb232e0e32..6fb8a97fe95821886c416d97224fb21fd0f2897b 100644
--- a/.gitlab/pipeline/check.yml
+++ b/.gitlab/pipeline/check.yml
@@ -132,7 +132,6 @@ check-runtime-migration-westend:
WASM: "westend_runtime.compact.compressed.wasm"
URI: "wss://westend-try-runtime-node.parity-chains.parity.io:443"
SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings"
- allow_failure: true
check-runtime-migration-rococo:
stage: check
diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml
index d8f5d5832291f7afced292d3b0fdeb6238de26a8..68712610ad2361601af3763485d1ab3e6c158682 100644
--- a/.gitlab/pipeline/publish.yml
+++ b/.gitlab/pipeline/publish.yml
@@ -74,6 +74,8 @@ publish-subsystem-benchmarks:
artifacts: true
- job: subsystem-benchmark-availability-distribution
artifacts: true
+ - job: subsystem-benchmark-approval-voting
+ artifacts: true
- job: publish-rustdoc
artifacts: false
script:
@@ -115,6 +117,8 @@ trigger_workflow:
artifacts: true
- job: subsystem-benchmark-availability-distribution
artifacts: true
+ - job: subsystem-benchmark-approval-voting
+ artifacts: true
script:
- echo "Triggering workflow"
- >
diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml
index 1d6efd7b9fd1a91c3c49aa26faa9263216e9cb4e..c17a3ce35eaf54896e7d96e6cfefbb7edf53ced7 100644
--- a/.gitlab/pipeline/test.yml
+++ b/.gitlab/pipeline/test.yml
@@ -511,7 +511,7 @@ test-syscalls:
fi
allow_failure: false # this rarely triggers in practice
-subsystem-benchmark-availability-recovery:
+.subsystem-benchmark-template:
stage: test
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
@@ -523,26 +523,26 @@ subsystem-benchmark-availability-recovery:
- .docker-env
- .common-refs
- .run-immediately
- script:
- - cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks
tags:
- benchmark
+
+subsystem-benchmark-availability-recovery:
+ extends:
+ - .subsystem-benchmark-template
+ script:
+ - cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks
allow_failure: true
subsystem-benchmark-availability-distribution:
- stage: test
- artifacts:
- name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
- when: always
- expire_in: 1 hour
- paths:
- - charts/
extends:
- - .docker-env
- - .common-refs
- - .run-immediately
+ - .subsystem-benchmark-template
script:
- cargo bench -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks
- tags:
- - benchmark
+ allow_failure: true
+
+subsystem-benchmark-approval-voting:
+ extends:
+ - .subsystem-benchmark-template
+ script:
+ - cargo bench -p polkadot-node-core-approval-voting --bench approval-voting-regression-bench --features subsystem-benchmarks
allow_failure: true
diff --git a/.gitlab/pipeline/zombienet/bridges.yml b/.gitlab/pipeline/zombienet/bridges.yml
index 4278f59b1e9a2e33f32bf255436d6af5d31b30fb..9d7a8b9311934a148e855caf7c4315d8a281aed1 100644
--- a/.gitlab/pipeline/zombienet/bridges.yml
+++ b/.gitlab/pipeline/zombienet/bridges.yml
@@ -55,9 +55,9 @@ zombienet-bridges-0001-asset-transfer-works:
- /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0001-asset-transfer --docker
- echo "Done"
-zombienet-bridges-0002-mandatory-headers-synced-while-idle:
+zombienet-bridges-0002-free-headers-synced-while-idle:
extends:
- .zombienet-bridges-common
script:
- - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0002-mandatory-headers-synced-while-idle --docker
+ - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0002-free-headers-synced-while-idle --docker
- echo "Done"
diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml
index 6b72075c513b73c075d1dc10c90d0461bf0e1a82..38c5332f309703dab881d1df88709fc4fe95e49c 100644
--- a/.gitlab/pipeline/zombienet/polkadot.yml
+++ b/.gitlab/pipeline/zombienet/polkadot.yml
@@ -176,6 +176,14 @@ zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains:
--local-dir="${LOCAL_DIR}/elastic_scaling"
--test="0002-elastic-scaling-doesnt-break-parachains.zndsl"
+zombienet-polkadot-functional-0012-spam-statement-distribution-requests:
+ extends:
+ - .zombienet-polkadot-common
+ script:
+ - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
+ --local-dir="${LOCAL_DIR}/functional"
+ --test="0012-spam-statement-distribution-requests.zndsl"
+
zombienet-polkadot-smoke-0001-parachains-smoke-test:
extends:
- .zombienet-polkadot-common
diff --git a/Cargo.lock b/Cargo.lock
index 27cb7af04d63390df820dc87ca7922157bfb32ff..634a98f82f882642d4f89ce65b5ef8261579bad1 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -42,15 +42,6 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234"
-[[package]]
-name = "aead"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877"
-dependencies = [
- "generic-array 0.14.7",
-]
-
[[package]]
name = "aead"
version = "0.5.2"
@@ -61,18 +52,6 @@ dependencies = [
"generic-array 0.14.7",
]
-[[package]]
-name = "aes"
-version = "0.7.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8"
-dependencies = [
- "cfg-if",
- "cipher 0.3.0",
- "cpufeatures",
- "opaque-debug 0.3.0",
-]
-
[[package]]
name = "aes"
version = "0.8.3"
@@ -84,31 +63,17 @@ dependencies = [
"cpufeatures",
]
-[[package]]
-name = "aes-gcm"
-version = "0.9.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f"
-dependencies = [
- "aead 0.4.3",
- "aes 0.7.5",
- "cipher 0.3.0",
- "ctr 0.7.0",
- "ghash 0.4.4",
- "subtle 2.5.0",
-]
-
[[package]]
name = "aes-gcm"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1"
dependencies = [
- "aead 0.5.2",
- "aes 0.8.3",
+ "aead",
+ "aes",
"cipher 0.4.4",
- "ctr 0.9.2",
- "ghash 0.5.0",
+ "ctr",
+ "ghash",
"subtle 2.5.0",
]
@@ -820,17 +785,22 @@ dependencies = [
"assert_matches",
"asset-hub-rococo-runtime",
"asset-test-utils",
+ "cumulus-pallet-parachain-system",
"emulated-integration-tests-common",
"frame-support",
"pallet-asset-conversion",
"pallet-assets",
"pallet-balances",
"pallet-message-queue",
+ "pallet-treasury",
+ "pallet-utility",
"pallet-xcm",
"parachains-common",
"parity-scale-codec",
"penpal-runtime",
+ "polkadot-runtime-common",
"rococo-runtime",
+ "rococo-runtime-constants",
"rococo-system-emulated-network",
"sp-runtime",
"staging-xcm",
@@ -866,6 +836,7 @@ dependencies = [
"hex-literal",
"log",
"pallet-asset-conversion",
+ "pallet-asset-conversion-ops",
"pallet-asset-conversion-tx-payment",
"pallet-assets",
"pallet-aura",
@@ -916,6 +887,7 @@ dependencies = [
"staging-xcm-executor",
"substrate-wasm-builder",
"testnet-parachains-constants",
+ "xcm-fee-payment-runtime-api",
]
[[package]]
@@ -990,6 +962,7 @@ dependencies = [
"hex-literal",
"log",
"pallet-asset-conversion",
+ "pallet-asset-conversion-ops",
"pallet-asset-conversion-tx-payment",
"pallet-assets",
"pallet-aura",
@@ -1037,6 +1010,7 @@ dependencies = [
"substrate-wasm-builder",
"testnet-parachains-constants",
"westend-runtime-constants",
+ "xcm-fee-payment-runtime-api",
]
[[package]]
@@ -1108,7 +1082,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35"
dependencies = [
"concurrent-queue",
- "event-listener 2.5.3",
+ "event-listener",
"futures-core",
]
@@ -1118,7 +1092,7 @@ version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb"
dependencies = [
- "async-lock 2.8.0",
+ "async-lock",
"async-task",
"concurrent-queue",
"fastrand 1.9.0",
@@ -1132,7 +1106,7 @@ version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06"
dependencies = [
- "async-lock 2.8.0",
+ "async-lock",
"autocfg",
"blocking",
"futures-lite",
@@ -1147,7 +1121,7 @@ dependencies = [
"async-channel",
"async-executor",
"async-io",
- "async-lock 2.8.0",
+ "async-lock",
"blocking",
"futures-lite",
"once_cell",
@@ -1159,7 +1133,7 @@ version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af"
dependencies = [
- "async-lock 2.8.0",
+ "async-lock",
"autocfg",
"cfg-if",
"concurrent-queue",
@@ -1179,18 +1153,7 @@ version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b"
dependencies = [
- "event-listener 2.5.3",
-]
-
-[[package]]
-name = "async-lock"
-version = "3.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b"
-dependencies = [
- "event-listener 4.0.3",
- "event-listener-strategy",
- "pin-project-lite 0.2.12",
+ "event-listener",
]
[[package]]
@@ -1212,11 +1175,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9"
dependencies = [
"async-io",
- "async-lock 2.8.0",
+ "async-lock",
"autocfg",
"blocking",
"cfg-if",
- "event-listener 2.5.3",
+ "event-listener",
"futures-lite",
"rustix 0.37.23",
"signal-hook",
@@ -1233,7 +1196,7 @@ dependencies = [
"async-channel",
"async-global-executor",
"async-io",
- "async-lock 2.8.0",
+ "async-lock",
"crossbeam-utils",
"futures-channel",
"futures-core",
@@ -1386,7 +1349,7 @@ dependencies = [
"rand_chacha 0.3.1",
"rand_core 0.6.4",
"ring 0.1.0",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"sp-ark-bls12-381",
"sp-ark-ed-on-bls12-381-bandersnatch",
"zeroize",
@@ -1646,7 +1609,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65"
dependencies = [
"async-channel",
- "async-lock 2.8.0",
+ "async-lock",
"async-task",
"atomic-waker",
"fastrand 1.9.0",
@@ -2146,6 +2109,7 @@ dependencies = [
"static_assertions",
"substrate-wasm-builder",
"testnet-parachains-constants",
+ "tuplex",
]
[[package]]
@@ -2215,7 +2179,6 @@ dependencies = [
"pallet-message-queue",
"pallet-xcm",
"parachains-common",
- "parity-scale-codec",
"rococo-westend-system-emulated-network",
"sp-runtime",
"staging-xcm",
@@ -2305,6 +2268,7 @@ dependencies = [
"static_assertions",
"substrate-wasm-builder",
"testnet-parachains-constants",
+ "tuplex",
"westend-runtime-constants",
]
@@ -2343,6 +2307,7 @@ dependencies = [
"staging-xcm",
"staging-xcm-builder",
"static_assertions",
+ "tuplex",
]
[[package]]
@@ -2531,18 +2496,6 @@ dependencies = [
"keystream",
]
-[[package]]
-name = "chacha20"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6"
-dependencies = [
- "cfg-if",
- "cipher 0.3.0",
- "cpufeatures",
- "zeroize",
-]
-
[[package]]
name = "chacha20"
version = "0.9.1"
@@ -2556,14 +2509,14 @@ dependencies = [
[[package]]
name = "chacha20poly1305"
-version = "0.9.1"
+version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5"
+checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35"
dependencies = [
- "aead 0.4.3",
- "chacha20 0.8.2",
- "cipher 0.3.0",
- "poly1305 0.7.2",
+ "aead",
+ "chacha20",
+ "cipher 0.4.4",
+ "poly1305",
"zeroize",
]
@@ -2643,15 +2596,6 @@ dependencies = [
"generic-array 0.14.7",
]
-[[package]]
-name = "cipher"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7"
-dependencies = [
- "generic-array 0.14.7",
-]
-
[[package]]
name = "cipher"
version = "0.4.4"
@@ -2660,6 +2604,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
dependencies = [
"crypto-common",
"inout",
+ "zeroize",
]
[[package]]
@@ -2830,6 +2775,36 @@ dependencies = [
"testnet-parachains-constants",
]
+[[package]]
+name = "collectives-westend-integration-tests"
+version = "1.0.0"
+dependencies = [
+ "assert_matches",
+ "asset-hub-westend-runtime",
+ "collectives-westend-runtime",
+ "cumulus-pallet-parachain-system",
+ "cumulus-pallet-xcmp-queue",
+ "emulated-integration-tests-common",
+ "frame-support",
+ "pallet-asset-rate",
+ "pallet-assets",
+ "pallet-balances",
+ "pallet-message-queue",
+ "pallet-treasury",
+ "pallet-utility",
+ "pallet-xcm",
+ "parachains-common",
+ "parity-scale-codec",
+ "polkadot-runtime-common",
+ "sp-runtime",
+ "staging-xcm",
+ "staging-xcm-executor",
+ "testnet-parachains-constants",
+ "westend-runtime",
+ "westend-runtime-constants",
+ "westend-system-emulated-network",
+]
+
[[package]]
name = "collectives-westend-runtime"
version = "3.0.0"
@@ -3637,15 +3612,6 @@ dependencies = [
"subtle 2.5.0",
]
-[[package]]
-name = "ctr"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481"
-dependencies = [
- "cipher 0.3.0",
-]
-
[[package]]
name = "ctr"
version = "0.9.2"
@@ -5034,7 +5000,7 @@ dependencies = [
"ed25519 2.2.2",
"rand_core 0.6.4",
"serde",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"subtle 2.5.0",
"zeroize",
]
@@ -5064,7 +5030,7 @@ dependencies = [
"hashbrown 0.14.3",
"hex",
"rand_core 0.6.4",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"zeroize",
]
@@ -5356,27 +5322,6 @@ version = "2.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
-[[package]]
-name = "event-listener"
-version = "4.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e"
-dependencies = [
- "concurrent-queue",
- "parking",
- "pin-project-lite 0.2.12",
-]
-
-[[package]]
-name = "event-listener-strategy"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3"
-dependencies = [
- "event-listener 4.0.3",
- "pin-project-lite 0.2.12",
-]
-
[[package]]
name = "exit-future"
version = "0.2.0"
@@ -6356,16 +6301,6 @@ dependencies = [
"rand_core 0.6.4",
]
-[[package]]
-name = "ghash"
-version = "0.4.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99"
-dependencies = [
- "opaque-debug 0.3.0",
- "polyval 0.5.3",
-]
-
[[package]]
name = "ghash"
version = "0.5.0"
@@ -6373,7 +6308,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40"
dependencies = [
"opaque-debug 0.3.0",
- "polyval 0.6.1",
+ "polyval",
]
[[package]]
@@ -7071,7 +7006,7 @@ dependencies = [
"curl",
"curl-sys",
"encoding_rs",
- "event-listener 2.5.3",
+ "event-listener",
"futures-lite",
"http",
"log",
@@ -7147,9 +7082,9 @@ dependencies = [
[[package]]
name = "jsonrpsee"
-version = "0.22.2"
+version = "0.22.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87f3ae45a64cfc0882934f963be9431b2a165d667f53140358181f262aca0702"
+checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad"
dependencies = [
"jsonrpsee-core",
"jsonrpsee-http-client",
@@ -7163,9 +7098,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-client-transport"
-version = "0.22.2"
+version = "0.22.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "455fc882e56f58228df2aee36b88a1340eafd707c76af2fa68cf94b37d461131"
+checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa"
dependencies = [
"futures-util",
"http",
@@ -7184,12 +7119,11 @@ dependencies = [
[[package]]
name = "jsonrpsee-core"
-version = "0.22.2"
+version = "0.22.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b75568f4f9696e3a47426e1985b548e1a9fcb13372a5e320372acaf04aca30d1"
+checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d"
dependencies = [
"anyhow",
- "async-lock 3.3.0",
"async-trait",
"beef",
"futures-timer",
@@ -7210,9 +7144,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-http-client"
-version = "0.22.2"
+version = "0.22.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e7a95e346f55df84fb167b7e06470e196e7d5b9488a21d69c5d9732043ba7ba"
+checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5"
dependencies = [
"async-trait",
"hyper",
@@ -7230,9 +7164,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-proc-macros"
-version = "0.22.2"
+version = "0.22.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "30ca066e73dd70294aebc5c2675d8ffae43be944af027c857ce0d4c51785f014"
+checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0"
dependencies = [
"heck 0.4.1",
"proc-macro-crate 3.0.0",
@@ -7243,9 +7177,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-server"
-version = "0.22.2"
+version = "0.22.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e29c1bd1f9bba83c864977c73404e505f74f730fa0db89dd490ec174e36d7f0"
+checksum = "12d8b6a9674422a8572e0b0abb12feeb3f2aeda86528c80d0350c2bd0923ab41"
dependencies = [
"futures-util",
"http",
@@ -7267,9 +7201,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-types"
-version = "0.22.2"
+version = "0.22.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3467fd35feeee179f71ab294516bdf3a81139e7aeebdd860e46897c12e1a3368"
+checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d"
dependencies = [
"anyhow",
"beef",
@@ -7280,9 +7214,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-ws-client"
-version = "0.22.2"
+version = "0.22.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68ca71e74983f624c0cb67828e480a981586074da8ad3a2f214c6a3f884edab9"
+checksum = "58b9db2dfd5bb1194b0ce921504df9ceae210a345bc2f6c5a61432089bbab070"
dependencies = [
"http",
"jsonrpsee-client-transport",
@@ -7302,7 +7236,7 @@ dependencies = [
"elliptic-curve",
"once_cell",
"serdect",
- "sha2 0.10.7",
+ "sha2 0.10.8",
]
[[package]]
@@ -7348,6 +7282,7 @@ dependencies = [
"node-primitives",
"pallet-alliance",
"pallet-asset-conversion",
+ "pallet-asset-conversion-ops",
"pallet-asset-conversion-tx-payment",
"pallet-asset-rate",
"pallet-asset-tx-payment",
@@ -7537,9 +7472,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67"
[[package]]
name = "libc"
-version = "0.2.152"
+version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7"
+checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "libflate"
@@ -7732,7 +7667,7 @@ dependencies = [
"multihash 0.17.0",
"quick-protobuf",
"rand 0.8.5",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"thiserror",
"zeroize",
]
@@ -7757,7 +7692,7 @@ dependencies = [
"log",
"quick-protobuf",
"rand 0.8.5",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"smallvec",
"thiserror",
"uint",
@@ -7815,7 +7750,7 @@ dependencies = [
"once_cell",
"quick-protobuf",
"rand 0.8.5",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"snow",
"static_assertions",
"thiserror",
@@ -8150,7 +8085,7 @@ dependencies = [
[[package]]
name = "litep2p"
version = "0.3.0"
-source = "git+https://github.com/paritytech/litep2p?branch=master#b142c9eb611fb2fe78d2830266a3675b37299ceb"
+source = "git+https://github.com/paritytech/litep2p?rev=e03a6023882db111beeb24d8c0ceaac0721d3f0f#e03a6023882db111beeb24d8c0ceaac0721d3f0f"
dependencies = [
"async-trait",
"bs58 0.4.0",
@@ -8177,7 +8112,7 @@ dependencies = [
"ring 0.16.20",
"rustls 0.20.8",
"serde",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"simple-dns",
"smallvec",
"snow",
@@ -8508,6 +8443,19 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
+[[package]]
+name = "minimal-template"
+version = "0.0.0"
+dependencies = [
+ "docify",
+ "minimal-template-node",
+ "minimal-template-runtime",
+ "pallet-minimal-template",
+ "polkadot-sdk-docs",
+ "polkadot-sdk-frame",
+ "simple-mermaid",
+]
+
[[package]]
name = "minimal-template-node"
version = "0.0.0"
@@ -8713,7 +8661,7 @@ dependencies = [
"core2",
"digest 0.10.7",
"multihash-derive 0.8.0",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"sha3",
"unsigned-varint",
]
@@ -8730,7 +8678,7 @@ dependencies = [
"core2",
"digest 0.10.7",
"multihash-derive 0.8.0",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"sha3",
"unsigned-varint",
]
@@ -8760,7 +8708,7 @@ dependencies = [
"ripemd",
"serde",
"sha1",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"sha3",
"strobe-rs",
]
@@ -9499,6 +9447,7 @@ dependencies = [
"frame-benchmarking",
"frame-support",
"frame-system",
+ "log",
"pallet-assets",
"pallet-balances",
"parity-scale-codec",
@@ -9512,6 +9461,27 @@ dependencies = [
"sp-std 14.0.0",
]
+[[package]]
+name = "pallet-asset-conversion-ops"
+version = "0.1.0"
+dependencies = [
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "log",
+ "pallet-asset-conversion",
+ "pallet-assets",
+ "pallet-balances",
+ "parity-scale-codec",
+ "primitive-types",
+ "scale-info",
+ "sp-arithmetic",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std 14.0.0",
+]
+
[[package]]
name = "pallet-asset-conversion-tx-payment"
version = "10.0.0"
@@ -9571,7 +9541,7 @@ dependencies = [
[[package]]
name = "pallet-assets"
-version = "29.0.0"
+version = "29.1.0"
dependencies = [
"frame-benchmarking",
"frame-support",
@@ -9936,7 +9906,9 @@ dependencies = [
"frame-benchmarking",
"frame-support",
"frame-system",
+ "log",
"parity-scale-codec",
+ "pretty_assertions",
"scale-info",
"sp-api",
"sp-arithmetic",
@@ -11715,7 +11687,6 @@ dependencies = [
"polkadot-primitives",
"polkadot-runtime-common",
"scale-info",
- "sp-core",
"sp-io",
"sp-runtime",
"sp-std 14.0.0",
@@ -11784,6 +11755,7 @@ dependencies = [
"cumulus-primitives-core",
"cumulus-primitives-parachain-inherent",
"cumulus-relay-chain-interface",
+ "docify",
"frame-benchmarking",
"frame-benchmarking-cli",
"futures",
@@ -11836,9 +11808,11 @@ dependencies = [
"cumulus-pallet-session-benchmarking",
"cumulus-pallet-xcm",
"cumulus-pallet-xcmp-queue",
+ "cumulus-primitives-aura",
"cumulus-primitives-core",
"cumulus-primitives-storage-weight-reclaim",
"cumulus-primitives-utility",
+ "docify",
"frame-benchmarking",
"frame-executive",
"frame-support",
@@ -12491,7 +12465,7 @@ checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48"
dependencies = [
"once_cell",
"pest",
- "sha2 0.10.7",
+ "sha2 0.10.8",
]
[[package]]
@@ -12956,6 +12930,7 @@ dependencies = [
"polkadot-overseer",
"polkadot-primitives",
"polkadot-primitives-test-helpers",
+ "polkadot-subsystem-bench",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rand_core 0.6.4",
@@ -13241,7 +13216,6 @@ dependencies = [
"slotmap",
"sp-core",
"sp-maybe-compressed-blob",
- "sp-wasm-interface 20.0.0",
"tempfile",
"test-parachain-adder",
"test-parachain-halt",
@@ -13278,7 +13252,6 @@ name = "polkadot-node-core-pvf-common"
version = "7.0.0"
dependencies = [
"assert_matches",
- "cfg-if",
"cpu-time",
"futures",
"landlock",
@@ -13895,8 +13868,11 @@ dependencies = [
name = "polkadot-sdk-docs"
version = "0.0.1"
dependencies = [
+ "cumulus-client-service",
"cumulus-pallet-aura-ext",
"cumulus-pallet-parachain-system",
+ "cumulus-primitives-proof-size-hostfunction",
+ "cumulus-primitives-storage-weight-reclaim",
"docify",
"frame-executive",
"frame-support",
@@ -13934,9 +13910,11 @@ dependencies = [
"sc-consensus-grandpa",
"sc-consensus-manual-seal",
"sc-consensus-pow",
+ "sc-executor",
"sc-network",
"sc-rpc",
"sc-rpc-api",
+ "sc-service",
"scale-info",
"simple-mermaid",
"sp-api",
@@ -14271,6 +14249,7 @@ dependencies = [
"polkadot-node-core-pvf-common",
"polkadot-node-core-pvf-execute-worker",
"polkadot-node-core-pvf-prepare-worker",
+ "polkadot-node-network-protocol",
"polkadot-node-primitives",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
@@ -14288,7 +14267,6 @@ dependencies = [
name = "polkadot-test-runtime"
version = "1.0.0"
dependencies = [
- "bitvec",
"frame-election-provider-support",
"frame-executive",
"frame-support",
@@ -14313,16 +14291,12 @@ dependencies = [
"pallet-vesting",
"pallet-xcm",
"parity-scale-codec",
- "polkadot-parachain-primitives",
"polkadot-primitives",
"polkadot-runtime-common",
"polkadot-runtime-parachains",
- "rustc-hex",
"scale-info",
"serde",
- "serde_derive",
"serde_json",
- "smallvec",
"sp-api",
"sp-authority-discovery",
"sp-block-builder",
@@ -14511,17 +14485,6 @@ dependencies = [
"windows-sys 0.48.0",
]
-[[package]]
-name = "poly1305"
-version = "0.7.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede"
-dependencies = [
- "cpufeatures",
- "opaque-debug 0.3.0",
- "universal-hash 0.4.0",
-]
-
[[package]]
name = "poly1305"
version = "0.8.0"
@@ -14530,19 +14493,7 @@ checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf"
dependencies = [
"cpufeatures",
"opaque-debug 0.3.0",
- "universal-hash 0.5.1",
-]
-
-[[package]]
-name = "polyval"
-version = "0.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1"
-dependencies = [
- "cfg-if",
- "cpufeatures",
- "opaque-debug 0.3.0",
- "universal-hash 0.4.0",
+ "universal-hash",
]
[[package]]
@@ -14554,7 +14505,7 @@ dependencies = [
"cfg-if",
"cpufeatures",
"opaque-debug 0.3.0",
- "universal-hash 0.5.1",
+ "universal-hash",
]
[[package]]
@@ -17403,6 +17354,7 @@ dependencies = [
"sc-transaction-pool",
"sc-transaction-pool-api",
"sc-utils",
+ "schnellru",
"serde",
"serde_json",
"sp-api",
@@ -17832,7 +17784,7 @@ version = "0.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0"
dependencies = [
- "aead 0.5.2",
+ "aead",
"arrayref",
"arrayvec 0.7.4",
"curve25519-dalek 4.1.2",
@@ -17840,7 +17792,7 @@ dependencies = [
"merlin",
"rand_core 0.6.4",
"serde_bytes",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"subtle 2.5.0",
"zeroize",
]
@@ -18253,9 +18205,9 @@ dependencies = [
[[package]]
name = "sha2"
-version = "0.10.7"
+version = "0.10.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8"
+checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
dependencies = [
"cfg-if",
"cpufeatures",
@@ -18455,7 +18407,7 @@ dependencies = [
"async-executor",
"async-fs",
"async-io",
- "async-lock 2.8.0",
+ "async-lock",
"async-net",
"async-process",
"blocking",
@@ -18478,18 +18430,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0bb30cf57b7b5f6109ce17c3164445e2d6f270af2cb48f6e4d31c2967c9a9f5"
dependencies = [
"arrayvec 0.7.4",
- "async-lock 2.8.0",
+ "async-lock",
"atomic-take",
"base64 0.21.2",
"bip39",
"blake2-rfc",
"bs58 0.5.0",
- "chacha20 0.9.1",
+ "chacha20",
"crossbeam-queue",
"derive_more",
"ed25519-zebra 4.0.3",
"either",
- "event-listener 2.5.3",
+ "event-listener",
"fnv",
"futures-lite",
"futures-util",
@@ -18506,14 +18458,14 @@ dependencies = [
"num-traits",
"pbkdf2",
"pin-project",
- "poly1305 0.8.0",
+ "poly1305",
"rand 0.8.5",
"rand_chacha 0.3.1",
"ruzstd",
"schnorrkel 0.10.2",
"serde",
"serde_json",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"sha3",
"siphasher",
"slab",
@@ -18532,12 +18484,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "256b5bad1d6b49045e95fe87492ce73d5af81545d8b4d8318a872d2007024c33"
dependencies = [
"async-channel",
- "async-lock 2.8.0",
+ "async-lock",
"base64 0.21.2",
"blake2-rfc",
"derive_more",
"either",
- "event-listener 2.5.3",
+ "event-listener",
"fnv",
"futures-channel",
"futures-lite",
@@ -18569,18 +18521,18 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831"
[[package]]
name = "snow"
-version = "0.9.3"
+version = "0.9.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155"
+checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85"
dependencies = [
- "aes-gcm 0.9.2",
+ "aes-gcm",
"blake2 0.10.6",
"chacha20poly1305",
"curve25519-dalek 4.1.2",
"rand_core 0.6.4",
- "ring 0.16.20",
+ "ring 0.17.7",
"rustc_version 0.4.0",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"subtle 2.5.0",
]
@@ -19428,7 +19380,7 @@ dependencies = [
"byteorder",
"criterion 0.4.0",
"digest 0.10.7",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"sha3",
"sp-crypto-hashing-proc-macro",
"twox-hash",
@@ -19842,14 +19794,14 @@ dependencies = [
name = "sp-statement-store"
version = "10.0.0"
dependencies = [
- "aes-gcm 0.10.3",
+ "aes-gcm",
"curve25519-dalek 4.1.2",
"ed25519-dalek 2.1.0",
"hkdf",
"parity-scale-codec",
"rand 0.8.5",
"scale-info",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"sp-api",
"sp-application-crypto",
"sp-core",
@@ -20393,9 +20345,9 @@ dependencies = [
[[package]]
name = "str0m"
-version = "0.2.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee48572247f422dcbe68630c973f8296fbd5157119cd36a3223e48bf83d47727"
+checksum = "d3f10d3f68e60168d81110410428a435dbde28cc5525f5f7c6fdec92dbdc2800"
dependencies = [
"combine",
"crc",
@@ -20546,7 +20498,7 @@ dependencies = [
"pbkdf2",
"rustc-hex",
"schnorrkel 0.11.4",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"zeroize",
]
@@ -21212,11 +21164,8 @@ version = "1.0.0"
dependencies = [
"frame-support",
"polkadot-primitives",
- "polkadot-runtime-common",
"smallvec",
- "sp-core",
"sp-runtime",
- "sp-weights",
]
[[package]]
@@ -21994,6 +21943,12 @@ dependencies = [
"utf-8",
]
+[[package]]
+name = "tuplex"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "676ac81d5454c4dcf37955d34fa8626ede3490f744b86ca14a7b90168d2a08aa"
+
[[package]]
name = "twox-hash"
version = "1.6.3"
@@ -22081,16 +22036,6 @@ version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
-[[package]]
-name = "universal-hash"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402"
-dependencies = [
- "generic-array 0.14.7",
- "subtle 2.5.0",
-]
-
[[package]]
name = "universal-hash"
version = "0.5.1"
@@ -22245,7 +22190,7 @@ dependencies = [
"rand 0.8.5",
"rand_chacha 0.3.1",
"rand_core 0.6.4",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"sha3",
"thiserror",
"zeroize",
@@ -22563,7 +22508,7 @@ dependencies = [
"log",
"rustix 0.36.15",
"serde",
- "sha2 0.10.7",
+ "sha2 0.10.8",
"toml 0.5.11",
"windows-sys 0.45.0",
"zstd 0.11.2+zstd.1.5.2",
diff --git a/Cargo.toml b/Cargo.toml
index 460c49f7f37c2d43e021e4bfa8f4263ac1ea3063..1d3f3d8e9ecd137493e72370734378fac9b19a87 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -103,6 +103,7 @@ members = [
"cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend",
"cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo",
"cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend",
+ "cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend",
"cumulus/parachains/integration-tests/emulated/tests/people/people-rococo",
"cumulus/parachains/integration-tests/emulated/tests/people/people-westend",
"cumulus/parachains/pallets/collective-content",
@@ -300,6 +301,7 @@ members = [
"substrate/frame",
"substrate/frame/alliance",
"substrate/frame/asset-conversion",
+ "substrate/frame/asset-conversion/ops",
"substrate/frame/asset-rate",
"substrate/frame/assets",
"substrate/frame/atomic-swap",
@@ -511,6 +513,7 @@ members = [
"substrate/utils/substrate-bip39",
"substrate/utils/wasm-builder",
+ "templates/minimal",
"templates/minimal/node",
"templates/minimal/pallets/template",
"templates/minimal/runtime",
diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml
index 67b91a16a302d6214830241082b21c407b04c6d1..74049031afe63cf0d2bc95193541a2b1303a1bbf 100644
--- a/bridges/bin/runtime-common/Cargo.toml
+++ b/bridges/bin/runtime-common/Cargo.toml
@@ -16,6 +16,7 @@ hash-db = { version = "0.16.0", default-features = false }
log = { workspace = true }
scale-info = { version = "2.11.1", default-features = false, features = ["derive"] }
static_assertions = { version = "1.1", optional = true }
+tuplex = { version = "0.1", default-features = false }
# Bridge dependencies
@@ -82,6 +83,7 @@ std = [
"sp-runtime/std",
"sp-std/std",
"sp-trie/std",
+ "tuplex/std",
"xcm-builder/std",
"xcm/std",
]
diff --git a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs
index 4b0c052df8008410cb531c21d173ead2c4fdd450..2c152aef68226aee36e791a882b5859427a9a33d 100644
--- a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs
+++ b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs
@@ -18,55 +18,229 @@
//! obsolete (duplicated) data or do not pass some additional pallet-specific
//! checks.
-use crate::messages_call_ext::MessagesCallSubType;
-use pallet_bridge_grandpa::CallSubType as GrandpaCallSubType;
-use pallet_bridge_parachains::CallSubType as ParachainsCallSubtype;
-use sp_runtime::transaction_validity::TransactionValidity;
+use crate::{
+ extensions::refund_relayer_extension::RefundableParachainId,
+ messages_call_ext::MessagesCallSubType,
+};
+use bp_relayers::ExplicitOrAccountParams;
+use bp_runtime::Parachain;
+use pallet_bridge_grandpa::{
+ BridgedBlockNumber, CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper,
+};
+use pallet_bridge_parachains::{
+ CallSubType as ParachainsCallSubtype, SubmitParachainHeadsHelper, SubmitParachainHeadsInfo,
+};
+use pallet_bridge_relayers::Pallet as RelayersPallet;
+use sp_runtime::{
+ traits::{Get, PhantomData, UniqueSaturatedInto},
+ transaction_validity::{TransactionPriority, TransactionValidity, ValidTransactionBuilder},
+};
/// A duplication of the `FilterCall` trait.
///
/// We need this trait in order to be able to implement it for the messages pallet,
/// since the implementation is done outside of the pallet crate.
-pub trait BridgeRuntimeFilterCall {
- /// Checks if a runtime call is valid.
- fn validate(call: &Call) -> TransactionValidity;
+pub trait BridgeRuntimeFilterCall {
+ /// Data that may be passed from the validate to `post_dispatch`.
+ type ToPostDispatch;
+ /// Called during validation. Needs to checks whether a runtime call, submitted
+ /// by the `who` is valid. `who` may be `None` if transaction is not signed
+ /// by a regular account.
+ fn validate(who: &AccountId, call: &Call) -> (Self::ToPostDispatch, TransactionValidity);
+ /// Called after transaction is dispatched.
+ fn post_dispatch(_who: &AccountId, _has_failed: bool, _to_post_dispatch: Self::ToPostDispatch) {
+ }
+}
+
+/// Wrapper for the bridge GRANDPA pallet that checks calls for obsolete submissions
+/// and also boosts transaction priority if it has submitted by registered relayer.
+/// The boost is computed as
+/// `(BundledHeaderNumber - 1 - BestFinalizedHeaderNumber) * Priority::get()`.
+/// The boost is only applied if submitter has active registration in the relayers
+/// pallet.
+pub struct CheckAndBoostBridgeGrandpaTransactions(
+ PhantomData<(T, I, Priority, SlashAccount)>,
+);
+
+impl, SlashAccount: Get>
+ BridgeRuntimeFilterCall
+ for CheckAndBoostBridgeGrandpaTransactions
+where
+ T: pallet_bridge_relayers::Config + pallet_bridge_grandpa::Config,
+ T::RuntimeCall: GrandpaCallSubType,
+{
+ // bridged header number, bundled in transaction
+ type ToPostDispatch = Option>;
+
+ fn validate(
+ who: &T::AccountId,
+ call: &T::RuntimeCall,
+ ) -> (Self::ToPostDispatch, TransactionValidity) {
+ match GrandpaCallSubType::::check_obsolete_submit_finality_proof(call) {
+ Ok(Some(our_tx)) => {
+ let to_post_dispatch = Some(our_tx.base.block_number);
+ let total_priority_boost =
+ compute_priority_boost::(who, our_tx.improved_by);
+ (
+ to_post_dispatch,
+ ValidTransactionBuilder::default().priority(total_priority_boost).build(),
+ )
+ },
+ Ok(None) => (None, ValidTransactionBuilder::default().build()),
+ Err(e) => (None, Err(e)),
+ }
+ }
+
+ fn post_dispatch(
+ relayer: &T::AccountId,
+ has_failed: bool,
+ bundled_block_number: Self::ToPostDispatch,
+ ) {
+ // we are only interested in associated pallet submissions
+ let Some(bundled_block_number) = bundled_block_number else { return };
+ // we are only interested in failed or unneeded transactions
+ let has_failed =
+ has_failed || !SubmitFinalityProofHelper::::was_successful(bundled_block_number);
+
+ if !has_failed {
+ return
+ }
+
+ // let's slash registered relayer
+ RelayersPallet::::slash_and_deregister(
+ relayer,
+ ExplicitOrAccountParams::Explicit(SlashAccount::get()),
+ );
+ }
+}
+
+/// Wrapper for the bridge parachains pallet that checks calls for obsolete submissions
+/// and also boosts transaction priority if it has submitted by registered relayer.
+/// The boost is computed as
+/// `(BundledHeaderNumber - 1 - BestKnownHeaderNumber) * Priority::get()`.
+/// The boost is only applied if submitter has active registration in the relayers
+/// pallet.
+pub struct CheckAndBoostBridgeParachainsTransactions(
+ PhantomData<(T, RefPara, Priority, SlashAccount)>,
+);
+
+impl, SlashAccount: Get>
+ BridgeRuntimeFilterCall
+ for CheckAndBoostBridgeParachainsTransactions
+where
+ T: pallet_bridge_relayers::Config + pallet_bridge_parachains::Config,
+ RefPara: RefundableParachainId,
+ T::RuntimeCall: ParachainsCallSubtype,
+{
+ // bridged header number, bundled in transaction
+ type ToPostDispatch = Option;
+
+ fn validate(
+ who: &T::AccountId,
+ call: &T::RuntimeCall,
+ ) -> (Self::ToPostDispatch, TransactionValidity) {
+ match ParachainsCallSubtype::::check_obsolete_submit_parachain_heads(
+ call,
+ ) {
+ Ok(Some(our_tx)) if our_tx.base.para_id.0 == RefPara::BridgedChain::PARACHAIN_ID => {
+ let to_post_dispatch = Some(our_tx.base);
+ let total_priority_boost =
+ compute_priority_boost::(&who, our_tx.improved_by);
+ (
+ to_post_dispatch,
+ ValidTransactionBuilder::default().priority(total_priority_boost).build(),
+ )
+ },
+ Ok(_) => (None, ValidTransactionBuilder::default().build()),
+ Err(e) => (None, Err(e)),
+ }
+ }
+
+ fn post_dispatch(relayer: &T::AccountId, has_failed: bool, maybe_update: Self::ToPostDispatch) {
+ // we are only interested in associated pallet submissions
+ let Some(update) = maybe_update else { return };
+ // we are only interested in failed or unneeded transactions
+ let has_failed = has_failed ||
+ !SubmitParachainHeadsHelper::::was_successful(&update);
+
+ if !has_failed {
+ return
+ }
+
+ // let's slash registered relayer
+ RelayersPallet::::slash_and_deregister(
+ relayer,
+ ExplicitOrAccountParams::Explicit(SlashAccount::get()),
+ );
+ }
}
-impl BridgeRuntimeFilterCall for pallet_bridge_grandpa::Pallet
+impl BridgeRuntimeFilterCall
+ for pallet_bridge_grandpa::Pallet
where
T: pallet_bridge_grandpa::Config,
T::RuntimeCall: GrandpaCallSubType,
{
- fn validate(call: &T::RuntimeCall) -> TransactionValidity {
- GrandpaCallSubType::::check_obsolete_submit_finality_proof(call)
+ type ToPostDispatch = ();
+ fn validate(_who: &T::AccountId, call: &T::RuntimeCall) -> ((), TransactionValidity) {
+ (
+ (),
+ GrandpaCallSubType::::check_obsolete_submit_finality_proof(call)
+ .and_then(|_| ValidTransactionBuilder::default().build()),
+ )
}
}
-impl BridgeRuntimeFilterCall
+impl BridgeRuntimeFilterCall
for pallet_bridge_parachains::Pallet
where
T: pallet_bridge_parachains::Config,
T::RuntimeCall: ParachainsCallSubtype,
{
- fn validate(call: &T::RuntimeCall) -> TransactionValidity {
- ParachainsCallSubtype::::check_obsolete_submit_parachain_heads(call)
+ type ToPostDispatch = ();
+ fn validate(_who: &T::AccountId, call: &T::RuntimeCall) -> ((), TransactionValidity) {
+ (
+ (),
+ ParachainsCallSubtype::::check_obsolete_submit_parachain_heads(call)
+ .and_then(|_| ValidTransactionBuilder::default().build()),
+ )
}
}
-impl, I: 'static> BridgeRuntimeFilterCall
- for pallet_bridge_messages::Pallet
+impl, I: 'static>
+ BridgeRuntimeFilterCall for pallet_bridge_messages::Pallet
where
T::RuntimeCall: MessagesCallSubType,
{
+ type ToPostDispatch = ();
/// Validate messages in order to avoid "mining" messages delivery and delivery confirmation
/// transactions, that are delivering outdated messages/confirmations. Without this validation,
/// even honest relayers may lose their funds if there are multiple relays running and
/// submitting the same messages/confirmations.
- fn validate(call: &T::RuntimeCall) -> TransactionValidity {
- call.check_obsolete_call()
+ fn validate(_who: &T::AccountId, call: &T::RuntimeCall) -> ((), TransactionValidity) {
+ ((), call.check_obsolete_call())
}
}
+/// Computes priority boost that improved known header by `improved_by`
+fn compute_priority_boost(
+ relayer: &T::AccountId,
+ improved_by: N,
+) -> TransactionPriority
+where
+ T: pallet_bridge_relayers::Config,
+ N: UniqueSaturatedInto,
+ Priority: Get,
+{
+ // we only boost priority if relayer has staked required balance
+ let is_relayer_registration_active = RelayersPallet::::is_registration_active(relayer);
+ // if tx improves by just one, there's no need to bump its priority
+ let improved_by: TransactionPriority = improved_by.unique_saturated_into().saturating_sub(1);
+ // if relayer is registered, for every skipped header we improve by `Priority`
+ let boost_per_header = if is_relayer_registration_active { Priority::get() } else { 0 };
+ improved_by.saturating_mul(boost_per_header)
+}
+
/// Declares a runtime-specific `BridgeRejectObsoleteHeadersAndMessages` signed extension.
///
/// ## Example
@@ -92,7 +266,15 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages {
type AccountId = $account_id;
type Call = $call;
type AdditionalSigned = ();
- type Pre = ();
+ type Pre = (
+ $account_id,
+ ( $(
+ <$filter_call as $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall<
+ $account_id,
+ $call,
+ >>::ToPostDispatch,
+ )* ),
+ );
fn additional_signed(&self) -> sp_std::result::Result<
(),
@@ -101,29 +283,72 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages {
Ok(())
}
+ #[allow(unused_variables)]
fn validate(
&self,
- _who: &Self::AccountId,
+ who: &Self::AccountId,
call: &Self::Call,
_info: &sp_runtime::traits::DispatchInfoOf,
_len: usize,
) -> sp_runtime::transaction_validity::TransactionValidity {
- let valid = sp_runtime::transaction_validity::ValidTransaction::default();
+ let tx_validity = sp_runtime::transaction_validity::ValidTransaction::default();
+ let to_prepare = ();
$(
- let valid = valid
- .combine_with(<$filter_call as $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall<$call>>::validate(call)?);
+ let (from_validate, call_filter_validity) = <
+ $filter_call as
+ $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall<
+ Self::AccountId,
+ $call,
+ >>::validate(&who, call);
+ let tx_validity = tx_validity.combine_with(call_filter_validity?);
)*
- Ok(valid)
+ Ok(tx_validity)
}
+ #[allow(unused_variables)]
fn pre_dispatch(
self,
- who: &Self::AccountId,
+ relayer: &Self::AccountId,
call: &Self::Call,
info: &sp_runtime::traits::DispatchInfoOf,
len: usize,
) -> Result {
- self.validate(who, call, info, len).map(drop)
+ use tuplex::PushBack;
+ let to_post_dispatch = ();
+ $(
+ let (from_validate, call_filter_validity) = <
+ $filter_call as
+ $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall<
+ $account_id,
+ $call,
+ >>::validate(&relayer, call);
+ let _ = call_filter_validity?;
+ let to_post_dispatch = to_post_dispatch.push_back(from_validate);
+ )*
+ Ok((relayer.clone(), to_post_dispatch))
+ }
+
+ #[allow(unused_variables)]
+ fn post_dispatch(
+ to_post_dispatch: Option,
+ info: &sp_runtime::traits::DispatchInfoOf,
+ post_info: &sp_runtime::traits::PostDispatchInfoOf,
+ len: usize,
+ result: &sp_runtime::DispatchResult,
+ ) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> {
+ use tuplex::PopFront;
+ let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(()) };
+ let has_failed = result.is_err();
+ $(
+ let (item, to_post_dispatch) = to_post_dispatch.pop_front();
+ <
+ $filter_call as
+ $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall<
+ $account_id,
+ $call,
+ >>::post_dispatch(&relayer, has_failed, item);
+ )*
+ Ok(())
}
}
};
@@ -132,10 +357,23 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages {
#[cfg(test)]
mod tests {
use super::*;
+ use crate::{
+ extensions::refund_relayer_extension::{
+ tests::{
+ initialize_environment, relayer_account_at_this_chain,
+ submit_parachain_head_call_ex, submit_relay_header_call_ex,
+ },
+ RefundableParachain,
+ },
+ mock::*,
+ };
+ use bp_polkadot_core::parachains::ParaId;
+ use bp_runtime::HeaderId;
use frame_support::{assert_err, assert_ok};
use sp_runtime::{
- traits::SignedExtension,
+ traits::{ConstU64, SignedExtension},
transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction},
+ DispatchError,
};
pub struct MockCall {
@@ -143,7 +381,7 @@ mod tests {
}
impl sp_runtime::traits::Dispatchable for MockCall {
- type RuntimeOrigin = ();
+ type RuntimeOrigin = u64;
type Config = ();
type Info = ();
type PostInfo = ();
@@ -156,50 +394,287 @@ mod tests {
}
}
- struct FirstFilterCall;
- impl BridgeRuntimeFilterCall for FirstFilterCall {
- fn validate(call: &MockCall) -> TransactionValidity {
+ pub struct FirstFilterCall;
+ impl FirstFilterCall {
+ fn post_dispatch_called_with(success: bool) {
+ frame_support::storage::unhashed::put(&[1], &success);
+ }
+
+ fn verify_post_dispatch_called_with(success: bool) {
+ assert_eq!(frame_support::storage::unhashed::get::(&[1]), Some(success));
+ }
+ }
+
+ impl BridgeRuntimeFilterCall for FirstFilterCall {
+ type ToPostDispatch = u64;
+ fn validate(_who: &u64, call: &MockCall) -> (u64, TransactionValidity) {
if call.data <= 1 {
- return InvalidTransaction::Custom(1).into()
+ return (1, InvalidTransaction::Custom(1).into())
}
- Ok(ValidTransaction { priority: 1, ..Default::default() })
+ (1, Ok(ValidTransaction { priority: 1, ..Default::default() }))
+ }
+
+ fn post_dispatch(_who: &u64, has_failed: bool, to_post_dispatch: Self::ToPostDispatch) {
+ Self::post_dispatch_called_with(!has_failed);
+ assert_eq!(to_post_dispatch, 1);
+ }
+ }
+
+ pub struct SecondFilterCall;
+
+ impl SecondFilterCall {
+ fn post_dispatch_called_with(success: bool) {
+ frame_support::storage::unhashed::put(&[2], &success);
+ }
+
+ fn verify_post_dispatch_called_with(success: bool) {
+ assert_eq!(frame_support::storage::unhashed::get::(&[2]), Some(success));
}
}
- struct SecondFilterCall;
- impl BridgeRuntimeFilterCall for SecondFilterCall {
- fn validate(call: &MockCall) -> TransactionValidity {
+ impl BridgeRuntimeFilterCall for SecondFilterCall {
+ type ToPostDispatch = u64;
+ fn validate(_who: &u64, call: &MockCall) -> (u64, TransactionValidity) {
if call.data <= 2 {
- return InvalidTransaction::Custom(2).into()
+ return (2, InvalidTransaction::Custom(2).into())
}
- Ok(ValidTransaction { priority: 2, ..Default::default() })
+ (2, Ok(ValidTransaction { priority: 2, ..Default::default() }))
+ }
+
+ fn post_dispatch(_who: &u64, has_failed: bool, to_post_dispatch: Self::ToPostDispatch) {
+ Self::post_dispatch_called_with(!has_failed);
+ assert_eq!(to_post_dispatch, 2);
}
}
#[test]
- fn test() {
+ fn test_generated_obsolete_extension() {
generate_bridge_reject_obsolete_headers_and_messages!(
MockCall,
- (),
+ u64,
FirstFilterCall,
SecondFilterCall
);
- assert_err!(
- BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 1 }, &(), 0),
- InvalidTransaction::Custom(1)
- );
+ run_test(|| {
+ assert_err!(
+ BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 1 }, &(), 0),
+ InvalidTransaction::Custom(1)
+ );
+ assert_err!(
+ BridgeRejectObsoleteHeadersAndMessages.pre_dispatch(
+ &42,
+ &MockCall { data: 1 },
+ &(),
+ 0
+ ),
+ InvalidTransaction::Custom(1)
+ );
- assert_err!(
- BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 2 }, &(), 0),
- InvalidTransaction::Custom(2)
- );
+ assert_err!(
+ BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 2 }, &(), 0),
+ InvalidTransaction::Custom(2)
+ );
+ assert_err!(
+ BridgeRejectObsoleteHeadersAndMessages.pre_dispatch(
+ &42,
+ &MockCall { data: 2 },
+ &(),
+ 0
+ ),
+ InvalidTransaction::Custom(2)
+ );
- assert_ok!(
- BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 3 }, &(), 0),
- ValidTransaction { priority: 3, ..Default::default() }
- )
+ assert_eq!(
+ BridgeRejectObsoleteHeadersAndMessages
+ .validate(&42, &MockCall { data: 3 }, &(), 0)
+ .unwrap(),
+ ValidTransaction { priority: 3, ..Default::default() },
+ );
+ assert_eq!(
+ BridgeRejectObsoleteHeadersAndMessages
+ .pre_dispatch(&42, &MockCall { data: 3 }, &(), 0)
+ .unwrap(),
+ (42, (1, 2)),
+ );
+
+ // when post_dispatch is called with `Ok(())`, it is propagated to all "nested"
+ // extensions
+ assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch(
+ Some((0, (1, 2))),
+ &(),
+ &(),
+ 0,
+ &Ok(())
+ ));
+ FirstFilterCall::verify_post_dispatch_called_with(true);
+ SecondFilterCall::verify_post_dispatch_called_with(true);
+
+ // when post_dispatch is called with `Err(())`, it is propagated to all "nested"
+ // extensions
+ assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch(
+ Some((0, (1, 2))),
+ &(),
+ &(),
+ 0,
+ &Err(DispatchError::BadOrigin)
+ ));
+ FirstFilterCall::verify_post_dispatch_called_with(false);
+ SecondFilterCall::verify_post_dispatch_called_with(false);
+ });
+ }
+
+ frame_support::parameter_types! {
+ pub SlashDestination: ThisChainAccountId = 42;
+ }
+
+ type BridgeGrandpaWrapper =
+ CheckAndBoostBridgeGrandpaTransactions, SlashDestination>;
+
+ #[test]
+ fn grandpa_wrapper_does_not_boost_extensions_for_unregistered_relayer() {
+ run_test(|| {
+ initialize_environment(100, 100, 100);
+
+ let priority_boost = BridgeGrandpaWrapper::validate(
+ &relayer_account_at_this_chain(),
+ &submit_relay_header_call_ex(200),
+ )
+ .1
+ .unwrap()
+ .priority;
+ assert_eq!(priority_boost, 0);
+ })
+ }
+
+ #[test]
+ fn grandpa_wrapper_boosts_extensions_for_registered_relayer() {
+ run_test(|| {
+ initialize_environment(100, 100, 100);
+ BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000)
+ .unwrap();
+
+ let priority_boost = BridgeGrandpaWrapper::validate(
+ &relayer_account_at_this_chain(),
+ &submit_relay_header_call_ex(200),
+ )
+ .1
+ .unwrap()
+ .priority;
+ assert_eq!(priority_boost, 99_000);
+ })
+ }
+
+ #[test]
+ fn grandpa_wrapper_slashes_registered_relayer_if_transaction_fails() {
+ run_test(|| {
+ initialize_environment(100, 100, 100);
+ BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000)
+ .unwrap();
+
+ assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain()));
+ BridgeGrandpaWrapper::post_dispatch(&relayer_account_at_this_chain(), true, Some(150));
+ assert!(!BridgeRelayers::is_registration_active(&relayer_account_at_this_chain()));
+ })
+ }
+
+ #[test]
+ fn grandpa_wrapper_does_not_slash_registered_relayer_if_transaction_succeeds() {
+ run_test(|| {
+ initialize_environment(100, 100, 100);
+ BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000)
+ .unwrap();
+
+ assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain()));
+ BridgeGrandpaWrapper::post_dispatch(&relayer_account_at_this_chain(), false, Some(100));
+ assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain()));
+ })
+ }
+
+ type BridgeParachainsWrapper = CheckAndBoostBridgeParachainsTransactions<
+ TestRuntime,
+ RefundableParachain<(), BridgedUnderlyingParachain>,
+ ConstU64<1_000>,
+ SlashDestination,
+ >;
+
+ #[test]
+ fn parachains_wrapper_does_not_boost_extensions_for_unregistered_relayer() {
+ run_test(|| {
+ initialize_environment(100, 100, 100);
+
+ let priority_boost = BridgeParachainsWrapper::validate(
+ &relayer_account_at_this_chain(),
+ &submit_parachain_head_call_ex(200),
+ )
+ .1
+ .unwrap()
+ .priority;
+ assert_eq!(priority_boost, 0);
+ })
+ }
+
+ #[test]
+ fn parachains_wrapper_boosts_extensions_for_registered_relayer() {
+ run_test(|| {
+ initialize_environment(100, 100, 100);
+ BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000)
+ .unwrap();
+
+ let priority_boost = BridgeParachainsWrapper::validate(
+ &relayer_account_at_this_chain(),
+ &submit_parachain_head_call_ex(200),
+ )
+ .1
+ .unwrap()
+ .priority;
+ assert_eq!(priority_boost, 99_000);
+ })
+ }
+
+ #[test]
+ fn parachains_wrapper_slashes_registered_relayer_if_transaction_fails() {
+ run_test(|| {
+ initialize_environment(100, 100, 100);
+ BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000)
+ .unwrap();
+
+ assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain()));
+ BridgeParachainsWrapper::post_dispatch(
+ &relayer_account_at_this_chain(),
+ true,
+ Some(SubmitParachainHeadsInfo {
+ at_relay_block: HeaderId(150, Default::default()),
+ para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID),
+ para_head_hash: [150u8; 32].into(),
+ is_free_execution_expected: false,
+ }),
+ );
+ assert!(!BridgeRelayers::is_registration_active(&relayer_account_at_this_chain()));
+ })
+ }
+
+ #[test]
+ fn parachains_wrapper_does_not_slash_registered_relayer_if_transaction_succeeds() {
+ run_test(|| {
+ initialize_environment(100, 100, 100);
+ BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000)
+ .unwrap();
+
+ assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain()));
+ BridgeParachainsWrapper::post_dispatch(
+ &relayer_account_at_this_chain(),
+ false,
+ Some(SubmitParachainHeadsInfo {
+ at_relay_block: HeaderId(100, Default::default()),
+ para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID),
+ para_head_hash: [100u8; 32].into(),
+ is_free_execution_expected: false,
+ }),
+ );
+ assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain()));
+ })
}
}
diff --git a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs
index 5035553f508dfea94a0cb5ddf9b916dd7d9b4ea5..92810290f95e77a7fdc04cafaa1e6ab290e1661a 100644
--- a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs
+++ b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs
@@ -22,7 +22,6 @@
//! single message with nonce `N`, then the transaction with nonces `N..=N+100` will
//! be rejected. This can lower bridge throughput down to one message per block.
-use bp_messages::MessageNonce;
use frame_support::traits::Get;
use sp_runtime::transaction_validity::TransactionPriority;
@@ -30,16 +29,19 @@ use sp_runtime::transaction_validity::TransactionPriority;
#[allow(unused_imports)]
pub use integrity_tests::*;
-/// Compute priority boost for message delivery transaction that delivers
-/// given number of messages.
-pub fn compute_priority_boost(
- messages: MessageNonce,
-) -> TransactionPriority
+/// We'll deal with different bridge items here - messages, headers, ...
+/// To avoid being too verbose with generic code, let's just define a separate alias.
+pub type ItemCount = u64;
+
+/// Compute priority boost for transaction that brings given number of bridge
+/// items (messages, headers, ...), when every additional item adds `PriorityBoostPerItem`
+/// to transaction priority.
+pub fn compute_priority_boost(n_items: ItemCount) -> TransactionPriority
where
- PriorityBoostPerMessage: Get,
+ PriorityBoostPerItem: Get,
{
- // we don't want any boost for transaction with single message => minus one
- PriorityBoostPerMessage::get().saturating_mul(messages.saturating_sub(1))
+ // we don't want any boost for transaction with single (additional) item => minus one
+ PriorityBoostPerItem::get().saturating_mul(n_items.saturating_sub(1))
}
#[cfg(not(feature = "integrity-test"))]
@@ -47,7 +49,8 @@ mod integrity_tests {}
#[cfg(feature = "integrity-test")]
mod integrity_tests {
- use super::compute_priority_boost;
+ use super::{compute_priority_boost, ItemCount};
+ use crate::extensions::refund_relayer_extension::RefundableParachainId;
use bp_messages::MessageNonce;
use bp_runtime::PreComputedSize;
@@ -55,7 +58,6 @@ mod integrity_tests {
dispatch::{DispatchClass, DispatchInfo, Pays, PostDispatchInfo},
traits::Get,
};
- use pallet_bridge_messages::WeightInfoExt;
use pallet_transaction_payment::OnChargeTransaction;
use sp_runtime::{
traits::{Dispatchable, UniqueSaturatedInto, Zero},
@@ -68,37 +70,33 @@ mod integrity_tests {
T,
>>::Balance;
- /// Ensures that the value of `PriorityBoostPerMessage` matches the value of
- /// `tip_boost_per_message`.
+ /// Ensures that the value of `PriorityBoostPerItem` matches the value of
+ /// `tip_boost_per_item`.
///
- /// We want two transactions, `TX1` with `N` messages and `TX2` with `N+1` messages, have almost
- /// the same priority if we'll add `tip_boost_per_message` tip to the `TX1`. We want to be sure
- /// that if we add plain `PriorityBoostPerMessage` priority to `TX1`, the priority will be close
+ /// We want two transactions, `TX1` with `N` items and `TX2` with `N+1` items, have almost
+ /// the same priority if we'll add `tip_boost_per_item` tip to the `TX1`. We want to be sure
+ /// that if we add plain `PriorityBoostPerItem` priority to `TX1`, the priority will be close
/// to `TX2` as well.
- pub fn ensure_priority_boost_is_sane(
- tip_boost_per_message: BalanceOf,
+ fn ensure_priority_boost_is_sane(
+ param_name: &str,
+ max_items: ItemCount,
+ tip_boost_per_item: Balance,
+ estimate_priority: impl Fn(ItemCount, Balance) -> TransactionPriority,
) where
- Runtime:
- pallet_transaction_payment::Config + pallet_bridge_messages::Config,
- MessagesInstance: 'static,
- PriorityBoostPerMessage: Get,
- Runtime::RuntimeCall: Dispatchable,
- BalanceOf: Send + Sync + FixedPointOperand,
+ PriorityBoostPerItem: Get,
+ ItemCount: UniqueSaturatedInto,
+ Balance: FixedPointOperand + Zero,
{
- let priority_boost_per_message = PriorityBoostPerMessage::get();
- let maximal_messages_in_delivery_transaction =
- Runtime::MaxUnconfirmedMessagesAtInboundLane::get();
- for messages in 1..=maximal_messages_in_delivery_transaction {
- let base_priority = estimate_message_delivery_transaction_priority::<
- Runtime,
- MessagesInstance,
- >(messages, Zero::zero());
- let priority_boost = compute_priority_boost::(messages);
- let priority_with_boost = base_priority + priority_boost;
-
- let tip = tip_boost_per_message.saturating_mul((messages - 1).unique_saturated_into());
- let priority_with_tip =
- estimate_message_delivery_transaction_priority::(1, tip);
+ let priority_boost_per_item = PriorityBoostPerItem::get();
+ for n_items in 1..=max_items {
+ let base_priority = estimate_priority(n_items, Zero::zero());
+ let priority_boost = compute_priority_boost::(n_items);
+ let priority_with_boost = base_priority
+ .checked_add(priority_boost)
+ .expect("priority overflow: try lowering `max_items` or `tip_boost_per_item`?");
+
+ let tip = tip_boost_per_item.saturating_mul((n_items - 1).unique_saturated_into());
+ let priority_with_tip = estimate_priority(1, tip);
const ERROR_MARGIN: TransactionPriority = 5; // 5%
if priority_with_boost.abs_diff(priority_with_tip).saturating_mul(100) /
@@ -106,97 +104,304 @@ mod integrity_tests {
ERROR_MARGIN
{
panic!(
- "The PriorityBoostPerMessage value ({}) must be fixed to: {}",
- priority_boost_per_message,
- compute_priority_boost_per_message::(
- tip_boost_per_message
+ "The {param_name} value ({}) must be fixed to: {}",
+ priority_boost_per_item,
+ compute_priority_boost_per_item(
+ max_items,
+ tip_boost_per_item,
+ estimate_priority
),
);
}
}
}
- /// Compute priority boost that we give to message delivery transaction for additional message.
+ /// Compute priority boost that we give to bridge transaction for every
+ /// additional bridge item.
#[cfg(feature = "integrity-test")]
- fn compute_priority_boost_per_message(
- tip_boost_per_message: BalanceOf,
+ fn compute_priority_boost_per_item(
+ max_items: ItemCount,
+ tip_boost_per_item: Balance,
+ estimate_priority: impl Fn(ItemCount, Balance) -> TransactionPriority,
) -> TransactionPriority
where
- Runtime:
- pallet_transaction_payment::Config + pallet_bridge_messages::Config,
- MessagesInstance: 'static,
- Runtime::RuntimeCall: Dispatchable,
- BalanceOf: Send + Sync + FixedPointOperand,
+ ItemCount: UniqueSaturatedInto,
+ Balance: FixedPointOperand + Zero,
{
- // estimate priority of transaction that delivers one message and has large tip
- let maximal_messages_in_delivery_transaction =
- Runtime::MaxUnconfirmedMessagesAtInboundLane::get();
+ // estimate priority of transaction that delivers one item and has large tip
let small_with_tip_priority =
- estimate_message_delivery_transaction_priority::(
- 1,
- tip_boost_per_message
- .saturating_mul(maximal_messages_in_delivery_transaction.saturated_into()),
- );
- // estimate priority of transaction that delivers maximal number of messages, but has no tip
- let large_without_tip_priority = estimate_message_delivery_transaction_priority::<
- Runtime,
- MessagesInstance,
- >(maximal_messages_in_delivery_transaction, Zero::zero());
+ estimate_priority(1, tip_boost_per_item.saturating_mul(max_items.saturated_into()));
+ // estimate priority of transaction that delivers maximal number of items, but has no tip
+ let large_without_tip_priority = estimate_priority(max_items, Zero::zero());
small_with_tip_priority
.saturating_sub(large_without_tip_priority)
- .saturating_div(maximal_messages_in_delivery_transaction - 1)
+ .saturating_div(max_items - 1)
}
- /// Estimate message delivery transaction priority.
- #[cfg(feature = "integrity-test")]
- fn estimate_message_delivery_transaction_priority(
- messages: MessageNonce,
- tip: BalanceOf,
- ) -> TransactionPriority
- where
- Runtime:
- pallet_transaction_payment::Config + pallet_bridge_messages::Config,
- MessagesInstance: 'static,
- Runtime::RuntimeCall: Dispatchable,
- BalanceOf: Send + Sync + FixedPointOperand,
- {
- // just an estimation of extra transaction bytes that are added to every transaction
- // (including signature, signed extensions extra and etc + in our case it includes
- // all call arguments except the proof itself)
- let base_tx_size = 512;
- // let's say we are relaying similar small messages and for every message we add more trie
- // nodes to the proof (x0.5 because we expect some nodes to be reused)
- let estimated_message_size = 512;
- // let's say all our messages have the same dispatch weight
- let estimated_message_dispatch_weight =
- Runtime::WeightInfo::message_dispatch_weight(estimated_message_size);
- // messages proof argument size is (for every message) messages size + some additional
- // trie nodes. Some of them are reused by different messages, so let's take 2/3 of default
- // "overhead" constant
- let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size()
- .saturating_mul(2)
- .saturating_div(3)
- .saturating_add(estimated_message_size)
- .saturating_mul(messages as _);
-
- // finally we are able to estimate transaction size and weight
- let transaction_size = base_tx_size.saturating_add(messages_proof_size);
- let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight(
- &PreComputedSize(transaction_size as _),
- messages as _,
- estimated_message_dispatch_weight.saturating_mul(messages),
- );
-
- pallet_transaction_payment::ChargeTransactionPayment::::get_priority(
- &DispatchInfo {
- weight: transaction_weight,
- class: DispatchClass::Normal,
- pays_fee: Pays::Yes,
- },
- transaction_size as _,
- tip,
- Zero::zero(),
- )
+ /// Computations, specific to bridge relay chains transactions.
+ pub mod per_relay_header {
+ use super::*;
+
+ use bp_header_chain::{
+ max_expected_submit_finality_proof_arguments_size, ChainWithGrandpa,
+ };
+ use pallet_bridge_grandpa::WeightInfoExt;
+
+ /// Ensures that the value of `PriorityBoostPerHeader` matches the value of
+ /// `tip_boost_per_header`.
+ ///
+ /// We want two transactions, `TX1` with `N` headers and `TX2` with `N+1` headers, have
+ /// almost the same priority if we'll add `tip_boost_per_header` tip to the `TX1`. We want
+ /// to be sure that if we add plain `PriorityBoostPerHeader` priority to `TX1`, the priority
+ /// will be close to `TX2` as well.
+ pub fn ensure_priority_boost_is_sane(
+ tip_boost_per_header: BalanceOf,
+ ) where
+ Runtime:
+ pallet_transaction_payment::Config + pallet_bridge_grandpa::Config,
+ GrandpaInstance: 'static,
+ PriorityBoostPerHeader: Get,
+ Runtime::RuntimeCall: Dispatchable,
+ BalanceOf: Send + Sync + FixedPointOperand,
+ {
+ // the meaning of `max_items` here is different when comparing with message
+ // transactions - with messages we have a strict limit on maximal number of
+ // messages we can fit into a single transaction. With headers, current best
+ // header may be improved by any "number of items". But this number is only
+ // used to verify priority boost, so it should be fine to select this arbitrary
+ // value - it SHALL NOT affect any value, it just adds more tests for the value.
+ let maximal_improved_by = 4_096;
+ super::ensure_priority_boost_is_sane::>(
+ "PriorityBoostPerRelayHeader",
+ maximal_improved_by,
+ tip_boost_per_header,
+ |_n_headers, tip| {
+ estimate_relay_header_submit_transaction_priority::(
+ tip,
+ )
+ },
+ );
+ }
+
+ /// Estimate relay header delivery transaction priority.
+ #[cfg(feature = "integrity-test")]
+ fn estimate_relay_header_submit_transaction_priority(
+ tip: BalanceOf,
+ ) -> TransactionPriority
+ where
+ Runtime:
+ pallet_transaction_payment::Config + pallet_bridge_grandpa::Config,
+ GrandpaInstance: 'static,
+ Runtime::RuntimeCall: Dispatchable,
+ BalanceOf: Send + Sync + FixedPointOperand,
+ {
+ // just an estimation of extra transaction bytes that are added to every transaction
+ // (including signature, signed extensions extra and etc + in our case it includes
+ // all call arguments except the proof itself)
+ let base_tx_size = 512;
+ // let's say we are relaying largest relay chain headers
+ let tx_call_size = max_expected_submit_finality_proof_arguments_size::<
+ Runtime::BridgedChain,
+ >(true, Runtime::BridgedChain::MAX_AUTHORITIES_COUNT * 2 / 3 + 1);
+
+ // finally we are able to estimate transaction size and weight
+ let transaction_size = base_tx_size.saturating_add(tx_call_size);
+ let transaction_weight = Runtime::WeightInfo::submit_finality_proof_weight(
+ Runtime::BridgedChain::MAX_AUTHORITIES_COUNT * 2 / 3 + 1,
+ Runtime::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY,
+ );
+
+ pallet_transaction_payment::ChargeTransactionPayment::::get_priority(
+ &DispatchInfo {
+ weight: transaction_weight,
+ class: DispatchClass::Normal,
+ pays_fee: Pays::Yes,
+ },
+ transaction_size as _,
+ tip,
+ Zero::zero(),
+ )
+ }
+ }
+
+ /// Computations, specific to bridge parachains transactions.
+ pub mod per_parachain_header {
+ use super::*;
+
+ use bp_runtime::Parachain;
+ use pallet_bridge_parachains::WeightInfoExt;
+
+ /// Ensures that the value of `PriorityBoostPerHeader` matches the value of
+ /// `tip_boost_per_header`.
+ ///
+ /// We want two transactions, `TX1` with `N` headers and `TX2` with `N+1` headers, have
+ /// almost the same priority if we'll add `tip_boost_per_header` tip to the `TX1`. We want
+ /// to be sure that if we add plain `PriorityBoostPerHeader` priority to `TX1`, the priority
+ /// will be close to `TX2` as well.
+ pub fn ensure_priority_boost_is_sane(
+ tip_boost_per_header: BalanceOf,
+ ) where
+ Runtime: pallet_transaction_payment::Config
+ + pallet_bridge_parachains::Config,
+ RefundableParachain: RefundableParachainId,
+ PriorityBoostPerHeader: Get,
+ Runtime::RuntimeCall: Dispatchable,
+ BalanceOf: Send + Sync + FixedPointOperand,
+ {
+ // the meaning of `max_items` here is different when comparing with message
+ // transactions - with messages we have a strict limit on maximal number of
+ // messages we can fit into a single transaction. With headers, current best
+ // header may be improved by any "number of items". But this number is only
+ // used to verify priority boost, so it should be fine to select this arbitrary
+ // value - it SHALL NOT affect any value, it just adds more tests for the value.
+ let maximal_improved_by = 4_096;
+ super::ensure_priority_boost_is_sane::>(
+ "PriorityBoostPerParachainHeader",
+ maximal_improved_by,
+ tip_boost_per_header,
+ |_n_headers, tip| {
+ estimate_parachain_header_submit_transaction_priority::<
+ Runtime,
+ RefundableParachain,
+ >(tip)
+ },
+ );
+ }
+
+ /// Estimate parachain header delivery transaction priority.
+ #[cfg(feature = "integrity-test")]
+ fn estimate_parachain_header_submit_transaction_priority(
+ tip: BalanceOf,
+ ) -> TransactionPriority
+ where
+ Runtime: pallet_transaction_payment::Config
+ + pallet_bridge_parachains::Config,
+ RefundableParachain: RefundableParachainId,
+ Runtime::RuntimeCall: Dispatchable,
+ BalanceOf: Send + Sync + FixedPointOperand,
+ {
+ // just an estimation of extra transaction bytes that are added to every transaction
+ // (including signature, signed extensions extra and etc + in our case it includes
+ // all call arguments except the proof itself)
+ let base_tx_size = 512;
+ // let's say we are relaying largest parachain headers and proof takes some more bytes
+ let tx_call_size = >::WeightInfo::expected_extra_storage_proof_size()
+ .saturating_add(RefundableParachain::BridgedChain::MAX_HEADER_SIZE);
+
+ // finally we are able to estimate transaction size and weight
+ let transaction_size = base_tx_size.saturating_add(tx_call_size);
+ let transaction_weight = >::WeightInfo::submit_parachain_heads_weight(
+ Runtime::DbWeight::get(),
+ &PreComputedSize(transaction_size as _),
+ // just one parachain - all other submissions won't receive any boost
+ 1,
+ );
+
+ pallet_transaction_payment::ChargeTransactionPayment::::get_priority(
+ &DispatchInfo {
+ weight: transaction_weight,
+ class: DispatchClass::Normal,
+ pays_fee: Pays::Yes,
+ },
+ transaction_size as _,
+ tip,
+ Zero::zero(),
+ )
+ }
+ }
+
+ /// Computations, specific to bridge messages transactions.
+ pub mod per_message {
+ use super::*;
+
+ use pallet_bridge_messages::WeightInfoExt;
+
+ /// Ensures that the value of `PriorityBoostPerMessage` matches the value of
+ /// `tip_boost_per_message`.
+ ///
+ /// We want two transactions, `TX1` with `N` messages and `TX2` with `N+1` messages, have
+ /// almost the same priority if we'll add `tip_boost_per_message` tip to the `TX1`. We want
+ /// to be sure that if we add plain `PriorityBoostPerMessage` priority to `TX1`, the
+ /// priority will be close to `TX2` as well.
+ pub fn ensure_priority_boost_is_sane(
+ tip_boost_per_message: BalanceOf,
+ ) where
+ Runtime: pallet_transaction_payment::Config
+ + pallet_bridge_messages::Config,
+ MessagesInstance: 'static,
+ PriorityBoostPerMessage: Get,
+ Runtime::RuntimeCall: Dispatchable,
+ BalanceOf: Send + Sync + FixedPointOperand,
+ {
+ let maximal_messages_in_delivery_transaction =
+ Runtime::MaxUnconfirmedMessagesAtInboundLane::get();
+ super::ensure_priority_boost_is_sane::>(
+ "PriorityBoostPerMessage",
+ maximal_messages_in_delivery_transaction,
+ tip_boost_per_message,
+ |n_messages, tip| {
+ estimate_message_delivery_transaction_priority::(
+ n_messages, tip,
+ )
+ },
+ );
+ }
+
+ /// Estimate message delivery transaction priority.
+ #[cfg(feature = "integrity-test")]
+ fn estimate_message_delivery_transaction_priority(
+ messages: MessageNonce,
+ tip: BalanceOf,
+ ) -> TransactionPriority
+ where
+ Runtime: pallet_transaction_payment::Config
+ + pallet_bridge_messages::Config,
+ MessagesInstance: 'static,
+ Runtime::RuntimeCall: Dispatchable,
+ BalanceOf: Send + Sync + FixedPointOperand,
+ {
+ // just an estimation of extra transaction bytes that are added to every transaction
+ // (including signature, signed extensions extra and etc + in our case it includes
+ // all call arguments except the proof itself)
+ let base_tx_size = 512;
+ // let's say we are relaying similar small messages and for every message we add more
+ // trie nodes to the proof (x0.5 because we expect some nodes to be reused)
+ let estimated_message_size = 512;
+ // let's say all our messages have the same dispatch weight
+ let estimated_message_dispatch_weight =
+ Runtime::WeightInfo::message_dispatch_weight(estimated_message_size);
+ // messages proof argument size is (for every message) messages size + some additional
+ // trie nodes. Some of them are reused by different messages, so let's take 2/3 of
+ // default "overhead" constant
+ let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size()
+ .saturating_mul(2)
+ .saturating_div(3)
+ .saturating_add(estimated_message_size)
+ .saturating_mul(messages as _);
+
+ // finally we are able to estimate transaction size and weight
+ let transaction_size = base_tx_size.saturating_add(messages_proof_size);
+ let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight(
+ &PreComputedSize(transaction_size as _),
+ messages as _,
+ estimated_message_dispatch_weight.saturating_mul(messages),
+ );
+
+ pallet_transaction_payment::ChargeTransactionPayment::::get_priority(
+ &DispatchInfo {
+ weight: transaction_weight,
+ class: DispatchClass::Normal,
+ pays_fee: Pays::Yes,
+ },
+ transaction_size as _,
+ tip,
+ Zero::zero(),
+ )
+ }
}
}
diff --git a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs
index 64ae1d0b669f2ea8fdfba0df73752a9b0f6e8aec..5aa7f1c095d540a4ee5050aeb7d694c98b744683 100644
--- a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs
+++ b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs
@@ -24,7 +24,7 @@ use crate::messages_call_ext::{
};
use bp_messages::{LaneId, MessageNonce};
use bp_relayers::{ExplicitOrAccountParams, RewardsAccountOwner, RewardsAccountParams};
-use bp_runtime::{Chain, Parachain, ParachainIdOf, RangeInclusiveExt, StaticStrProvider};
+use bp_runtime::{Parachain, RangeInclusiveExt, StaticStrProvider};
use codec::{Codec, Decode, Encode};
use frame_support::{
dispatch::{CallableCallFor, DispatchInfo, PostDispatchInfo},
@@ -33,8 +33,7 @@ use frame_support::{
CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound,
};
use pallet_bridge_grandpa::{
- CallSubType as GrandpaCallSubType, Config as GrandpaConfig, SubmitFinalityProofHelper,
- SubmitFinalityProofInfo,
+ CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper, SubmitFinalityProofInfo,
};
use pallet_bridge_messages::Config as MessagesConfig;
use pallet_bridge_parachains::{
@@ -66,20 +65,9 @@ type CallOf = ::RuntimeCall;
/// coming from this parachain.
pub trait RefundableParachainId {
/// The instance of the bridge parachains pallet.
- type Instance;
+ type Instance: 'static;
/// The parachain Id.
- type Id: Get;
-}
-
-/// Default implementation of `RefundableParachainId`.
-pub struct DefaultRefundableParachainId(PhantomData<(Instance, Id)>);
-
-impl RefundableParachainId for DefaultRefundableParachainId
-where
- Id: Get,
-{
- type Instance = Instance;
- type Id = Id;
+ type BridgedChain: Parachain;
}
/// Implementation of `RefundableParachainId` for `trait Parachain`.
@@ -87,10 +75,11 @@ pub struct RefundableParachain(PhantomData<(Instance, Para)>);
impl RefundableParachainId for RefundableParachain
where
+ Instance: 'static,
Para: Parachain,
{
type Instance = Instance;
- type Id = ParachainIdOf;
+ type BridgedChain = Para;
}
/// Trait identifying a bridged messages lane. A relayer might be refunded for delivering messages
@@ -242,17 +231,10 @@ pub enum RelayerAccountAction {
/// Everything common among our refund signed extensions.
pub trait RefundSignedExtension:
'static + Clone + Codec + sp_std::fmt::Debug + Default + Eq + PartialEq + Send + Sync + TypeInfo
-where
- >::BridgedChain:
- Chain,
{
/// This chain runtime.
- type Runtime: UtilityConfig>
- + GrandpaConfig
- + MessagesConfig<::Instance>
+ type Runtime: MessagesConfig<::Instance>
+ RelayersConfig;
- /// Grandpa pallet reference.
- type GrandpaInstance: 'static;
/// Messages pallet and lane reference.
type Msgs: RefundableMessagesLaneId;
/// Refund amount calculator.
@@ -276,11 +258,13 @@ where
call: &CallOf,
) -> Result<&CallOf, TransactionValidityError>;
- /// Called from post-dispatch and shall perform additional checks (apart from relay
- /// chain finality and messages transaction finality) of given call result.
+ /// Called from post-dispatch and shall perform additional checks (apart from messages
+ /// transaction success) of given call result.
fn additional_call_result_check(
relayer: &AccountIdOf,
call_info: &CallInfo,
+ extra_weight: &mut Weight,
+ extra_size: &mut u32,
) -> bool;
/// Given post-dispatch information, analyze the outcome of relayer call and return
@@ -348,35 +332,6 @@ where
return slash_relayer_if_delivery_result
}
- // check if relay chain state has been updated
- if let Some(finality_proof_info) = call_info.submit_finality_proof_info() {
- if !SubmitFinalityProofHelper::::was_successful(
- finality_proof_info.block_number,
- ) {
- // we only refund relayer if all calls have updated chain state
- log::trace!(
- target: "runtime::bridge",
- "{} via {:?}: relayer {:?} has submitted invalid relay chain finality proof",
- Self::Id::STR,
- ::Id::get(),
- relayer,
- );
- return slash_relayer_if_delivery_result
- }
-
- // there's a conflict between how bridge GRANDPA pallet works and a `utility.batchAll`
- // transaction. If relay chain header is mandatory, the GRANDPA pallet returns
- // `Pays::No`, because such transaction is mandatory for operating the bridge. But
- // `utility.batchAll` transaction always requires payment. But in both cases we'll
- // refund relayer - either explicitly here, or using `Pays::No` if he's choosing
- // to submit dedicated transaction.
-
- // submitter has means to include extra weight/bytes in the `submit_finality_proof`
- // call, so let's subtract extra weight/size to avoid refunding for this extra stuff
- extra_weight = finality_proof_info.extra_weight;
- extra_size = finality_proof_info.extra_size;
- }
-
// Check if the `ReceiveMessagesProof` call delivered at least some of the messages that
// it contained. If this happens, we consider the transaction "helpful" and refund it.
let msgs_call_info = call_info.messages_call_info();
@@ -391,8 +346,13 @@ where
return slash_relayer_if_delivery_result
}
- // do additional check
- if !Self::additional_call_result_check(&relayer, &call_info) {
+ // do additional checks
+ if !Self::additional_call_result_check(
+ &relayer,
+ &call_info,
+ &mut extra_weight,
+ &mut extra_size,
+ ) {
return slash_relayer_if_delivery_result
}
@@ -468,18 +428,11 @@ where
RuntimeDebugNoBound,
TypeInfo,
)]
-pub struct RefundSignedExtensionAdapter(T)
-where
- >::BridgedChain:
- Chain;
+pub struct RefundSignedExtensionAdapter(T);
impl SignedExtension for RefundSignedExtensionAdapter
where
- >::BridgedChain:
- Chain,
CallOf: Dispatchable
- + IsSubType, T::Runtime>>
- + GrandpaCallSubType
+ MessagesCallSubType::Instance>,
{
const IDENTIFIER: &'static str = T::Id::STR;
@@ -644,6 +597,14 @@ impl RefundSignedExtension
for RefundBridgedParachainMessages
where
Self: 'static + Send + Sync,
+ RefundBridgedGrandpaMessages<
+ Runtime,
+ Runtime::BridgesGrandpaPalletInstance,
+ Msgs,
+ Refund,
+ Priority,
+ Id,
+ >: 'static + Send + Sync,
Runtime: UtilityConfig>
+ BoundedBridgeGrandpaConfig
+ ParachainsConfig
@@ -661,7 +622,6 @@ where
+ MessagesCallSubType,
{
type Runtime = Runtime;
- type GrandpaInstance = Runtime::BridgesGrandpaPalletInstance;
type Msgs = Msgs;
type Refund = Refund;
type Priority = Priority;
@@ -687,7 +647,7 @@ where
let para_finality_call = calls
.next()
.transpose()?
- .and_then(|c| c.submit_parachain_heads_info_for(Para::Id::get()));
+ .and_then(|c| c.submit_parachain_heads_info_for(Para::BridgedChain::PARACHAIN_ID));
let relay_finality_call =
calls.next().transpose()?.and_then(|c| c.submit_finality_proof_info());
@@ -711,7 +671,26 @@ where
Ok(call)
}
- fn additional_call_result_check(relayer: &Runtime::AccountId, call_info: &CallInfo) -> bool {
+ fn additional_call_result_check(
+ relayer: &Runtime::AccountId,
+ call_info: &CallInfo,
+ extra_weight: &mut Weight,
+ extra_size: &mut u32,
+ ) -> bool {
+ // check if relay chain state has been updated
+ let is_grandpa_call_successful =
+ RefundBridgedGrandpaMessages::<
+ Runtime,
+ Runtime::BridgesGrandpaPalletInstance,
+ Msgs,
+ Refund,
+ Priority,
+ Id,
+ >::additional_call_result_check(relayer, call_info, extra_weight, extra_size);
+ if !is_grandpa_call_successful {
+ return false
+ }
+
// check if parachain state has been updated
if let Some(para_proof_info) = call_info.submit_parachain_heads_info() {
if !SubmitParachainHeadsHelper::::was_successful(
@@ -722,7 +701,7 @@ where
target: "runtime::bridge",
"{} from parachain {} via {:?}: relayer {:?} has submitted invalid parachain finality proof",
Id::STR,
- Para::Id::get(),
+ Para::BridgedChain::PARACHAIN_ID,
Msgs::Id::get(),
relayer,
);
@@ -794,7 +773,6 @@ where
+ MessagesCallSubType,
{
type Runtime = Runtime;
- type GrandpaInstance = GrandpaInstance;
type Msgs = Msgs;
type Refund = Refund;
type Priority = Priority;
@@ -836,13 +814,125 @@ where
Ok(call)
}
- fn additional_call_result_check(_relayer: &Runtime::AccountId, _call_info: &CallInfo) -> bool {
+ fn additional_call_result_check(
+ relayer: &Runtime::AccountId,
+ call_info: &CallInfo,
+ extra_weight: &mut Weight,
+ extra_size: &mut u32,
+ ) -> bool {
+ // check if relay chain state has been updated
+ if let Some(finality_proof_info) = call_info.submit_finality_proof_info() {
+ if !SubmitFinalityProofHelper::::was_successful(
+ finality_proof_info.block_number,
+ ) {
+ // we only refund relayer if all calls have updated chain state
+ log::trace!(
+ target: "runtime::bridge",
+ "{} via {:?}: relayer {:?} has submitted invalid relay chain finality proof",
+ Self::Id::STR,
+ ::Id::get(),
+ relayer,
+ );
+ return false
+ }
+
+ // there's a conflict between how bridge GRANDPA pallet works and a `utility.batchAll`
+ // transaction. If relay chain header is mandatory, the GRANDPA pallet returns
+ // `Pays::No`, because such transaction is mandatory for operating the bridge. But
+ // `utility.batchAll` transaction always requires payment. But in both cases we'll
+ // refund relayer - either explicitly here, or using `Pays::No` if he's choosing
+ // to submit dedicated transaction.
+
+ // submitter has means to include extra weight/bytes in the `submit_finality_proof`
+ // call, so let's subtract extra weight/size to avoid refunding for this extra stuff
+ *extra_weight = (*extra_weight).saturating_add(finality_proof_info.extra_weight);
+ *extra_size = (*extra_size).saturating_add(finality_proof_info.extra_size);
+ }
+
+ true
+ }
+}
+
+/// Transaction extension that refunds a relayer for standalone messages delivery and confirmation
+/// transactions. Finality transactions are not refunded.
+#[derive(
+ DefaultNoBound,
+ CloneNoBound,
+ Decode,
+ Encode,
+ EqNoBound,
+ PartialEqNoBound,
+ RuntimeDebugNoBound,
+ TypeInfo,
+)]
+#[scale_info(skip_type_params(Runtime, GrandpaInstance, Msgs, Refund, Priority, Id))]
+pub struct RefundBridgedMessages(
+ PhantomData<(
+ // runtime with `pallet-bridge-messages` and `pallet-bridge-relayers` pallets deployed
+ Runtime,
+ // implementation of `RefundableMessagesLaneId` trait, which specifies the instance of
+ // the used `pallet-bridge-messages` pallet and the lane within this pallet
+ Msgs,
+ // implementation of the `RefundCalculator` trait, that is used to compute refund that
+ // we give to relayer for his transaction
+ Refund,
+ // getter for per-message `TransactionPriority` boost that we give to message
+ // delivery transactions
+ Priority,
+ // the runtime-unique identifier of this signed extension
+ Id,
+ )>,
+);
+
+impl RefundSignedExtension
+ for RefundBridgedMessages
+where
+ Self: 'static + Send + Sync,
+ Runtime: MessagesConfig + RelayersConfig,
+ Msgs: RefundableMessagesLaneId,
+ Refund: RefundCalculator,
+ Priority: Get,
+ Id: StaticStrProvider,
+ CallOf: Dispatchable
+ + MessagesCallSubType,
+{
+ type Runtime = Runtime;
+ type Msgs = Msgs;
+ type Refund = Refund;
+ type Priority = Priority;
+ type Id = Id;
+
+ fn expand_call(call: &CallOf) -> Vec<&CallOf> {
+ vec![call]
+ }
+
+ fn parse_and_check_for_obsolete_call(
+ call: &CallOf,
+ ) -> Result