diff --git a/.config/lychee.toml b/.config/lychee.toml
index b1f08de33340999d52114ed8c6b5fcd6604662fa..58f8d068d9d13d270c19445fa6983c605b0febb4 100644
--- a/.config/lychee.toml
+++ b/.config/lychee.toml
@@ -28,7 +28,7 @@ exclude = [
 	"http://visitme/",
 	"https://visitme/",
 
-	# TODO <https://github.com/paritytech/polkadot-sdk/issues/134>
+	# TODO meta issue: <https://github.com/paritytech/polkadot-sdk/issues/134>
 	"https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs",
 	"https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html",
 	"https://github.com/ipfs/js-ipfs-bitswap/blob/",
@@ -50,8 +50,10 @@ exclude = [
 	"https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html",
 
 	# Behind a captcha (code 403):
+	"https://chainlist.org/chain/*",
 	"https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/",
 	"https://www.reddit.com/r/rust/comments/3spfh1/does_collect_allocate_more_than_once_while/",
+
 	# 403 rate limited:
 	"https://etherscan.io/block/11090290",
 	"https://subscan.io/",
diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py
index 780fa0012976681adf1d2f75480ccc4b50f4ae4b..9154f185e64b959967f236e989d59659c347e9f3 100644
--- a/.github/scripts/generate-prdoc.py
+++ b/.github/scripts/generate-prdoc.py
@@ -36,6 +36,21 @@ def from_pr_number(n, audience, bump, force):
 
 	create_prdoc(n, audience, pr.title, pr.body, patch, bump, force)
 
+def translate_audience(audience):
+	aliases = {
+		'runtime_dev': 'Runtime Dev',
+		'runtime_user': 'Runtime Operator',
+		'node_dev': 'Node Dev',
+		'node_user': 'Node User',
+	}
+
+	if audience in aliases:
+		to = aliases[audience]
+		print(f"Translated audience '{audience}' to '{to}'")
+		audience = to
+
+	return audience
+
 def create_prdoc(pr, audience, title, description, patch, bump, force):
 	path = f"prdoc/pr_{pr}.prdoc"
 
@@ -49,6 +64,7 @@ def create_prdoc(pr, audience, title, description, patch, bump, force):
 		print(f"No preexisting PrDoc for PR {pr}")
 
 	prdoc = { "title": title, "doc": [{}], "crates": [] }
+	audience = translate_audience(audience)
 
 	prdoc["doc"][0]["audience"] = audience
 	prdoc["doc"][0]["description"] = description
@@ -117,7 +133,7 @@ def setup_parser(parser=None, pr_required=True):
 		parser = argparse.ArgumentParser()
 	parser.add_argument("--pr", type=int, required=pr_required, help="The PR number to generate the PrDoc for.")
 	parser.add_argument("--audience", type=str, nargs='*', choices=allowed_audiences, default=["todo"], help="The audience of whom the changes may concern. Example: --audience runtime_dev node_dev")
-	parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "no_change"], help="A default bump level for all crates. Example: --bump patch")
+	parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "none"], help="A default bump level for all crates. Example: --bump patch")
 	parser.add_argument("--force", action="store_true", help="Whether to overwrite any existing PrDoc.")
 	return parser
 
diff --git a/.github/workflows/benchmarks-networking.yml b/.github/workflows/benchmarks-networking.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8f4246c79548169e9fab03cd61719f9e04f2cea1
--- /dev/null
+++ b/.github/workflows/benchmarks-networking.yml
@@ -0,0 +1,111 @@
+name: Networking Benchmarks
+
+on:
+  push:
+    branches:
+      - master
+
+concurrency:
+  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+  cancel-in-progress: true
+
+permissions:
+  contents: read
+
+jobs:
+  preflight:
+    uses: ./.github/workflows/reusable-preflight.yml
+
+  build:
+    timeout-minutes: 50
+    needs: [preflight]
+    runs-on: ${{ needs.preflight.outputs.RUNNER_BENCHMARK }}
+    container:
+      image: ${{ needs.preflight.outputs.IMAGE }}
+    strategy:
+      fail-fast: false
+      matrix:
+        features:
+          [
+            { bench: "notifications_protocol" },
+            { bench: "request_response_protocol" },
+          ]
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+
+      - name: Run Benchmarks
+        id: run-benchmarks
+        run: |
+          mkdir -p ./charts
+          forklift cargo bench -p sc-network --bench ${{ matrix.features.bench }} -- --output-format bencher | grep "^test" | tee ./charts/${{ matrix.features.bench }}.txt || echo "Benchmarks failed"
+          ls -lsa ./charts
+
+      - name: Upload artifacts
+        uses: actions/upload-artifact@v4.3.6
+        with:
+          name: ${{ matrix.features.bench }}-${{ github.sha }}
+          path: ./charts
+
+  publish-benchmarks:
+    timeout-minutes: 60
+    needs: [build]
+    if: github.ref == 'refs/heads/master'
+    environment: subsystem-benchmarks
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+        with:
+          ref: gh-pages
+          fetch-depth: 0
+
+      - run: git checkout master --
+
+      - name: Download artifacts
+        uses: actions/download-artifact@v4.1.8
+        with:
+          name: notifications_protocol-${{ github.sha }}
+          path: ./charts
+
+      - name: Download artifacts
+        uses: actions/download-artifact@v4.1.8
+        with:
+          name: request_response_protocol-${{ github.sha }}
+          path: ./charts
+
+      - name: Setup git
+        run: |
+          # Fixes "detected dubious ownership" error in the ci
+          git config --global --add safe.directory '*'
+          ls -lsR ./charts
+
+      - uses: actions/create-github-app-token@v1
+        id: app-token
+        with:
+          app-id: ${{ secrets.POLKADOTSDK_GHPAGES_APP_ID }}
+          private-key: ${{ secrets.POLKADOTSDK_GHPAGES_APP_KEY }}
+
+      - name: Generate ${{ env.BENCH }}
+        env:
+          BENCH: notifications_protocol
+        uses: benchmark-action/github-action-benchmark@v1
+        with:
+          tool: "cargo"
+          name: ${{ env.BENCH }}
+          output-file-path: ./charts/${{ env.BENCH }}.txt
+          benchmark-data-dir-path: ./bench/${{ env.BENCH }}
+          github-token: ${{ steps.app-token.outputs.token }}
+          auto-push: true
+
+      - name: Generate ${{ env.BENCH }}
+        env:
+          BENCH: request_response_protocol
+        uses: benchmark-action/github-action-benchmark@v1
+        with:
+          tool: "cargo"
+          name: ${{ env.BENCH }}
+          output-file-path: ./charts/${{ env.BENCH }}.txt
+          benchmark-data-dir-path: ./bench/${{ env.BENCH }}
+          github-token: ${{ steps.app-token.outputs.token }}
+          auto-push: true
diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/benchmarks-subsystem.yml
similarity index 100%
rename from .github/workflows/subsystem-benchmarks.yml
rename to .github/workflows/benchmarks-subsystem.yml
diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml
index 16028c8de77003f0b51622b3bf18a61a4781a826..0da3e54ef60b88420a0bb9762bc9fff2e88be4af 100644
--- a/.github/workflows/check-semver.yml
+++ b/.github/workflows/check-semver.yml
@@ -2,7 +2,7 @@ name: Check semver
 
 on:
   pull_request:
-    types: [opened, synchronize, reopened, ready_for_review]
+    types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled]
   workflow_dispatch:
   merge_group:
 
@@ -62,21 +62,29 @@ jobs:
 
           echo "PRDOC_EXTRA_ARGS=--max-bump minor" >> $GITHUB_ENV
 
+      - name: Echo Skip
+        if: ${{ contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
+        run: echo "Skipping this PR because it is labeled as R0-silent."
+
       - name: Rust Cache
+        if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
         uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5
         with:
           cache-on-failure: true
 
       - name: Rust compilation prerequisites
+        if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
         run: |
           rustup default $TOOLCHAIN
           rustup component add rust-src --toolchain $TOOLCHAIN
 
       - name: install parity-publish
+        if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
         # Set the target dir to cache the build.
         run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.3 --locked -q
 
       - name: check semver
+        if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
         run: |
           if [ -z "$PR" ]; then
             echo "Skipping master/merge queue"
diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml
index 7022e8e0e0067cb2dee68ef4c009d59b22ab9360..71dbcfbd22893ac0cd94387e7311eae471751dbf 100644
--- a/.github/workflows/command-prdoc.yml
+++ b/.github/workflows/command-prdoc.yml
@@ -14,7 +14,7 @@ on:
         required: true
         options:
           - "TODO"
-          - "no_change"
+          - "none"
           - "patch"
           - "minor"
           - "major"
diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml
index 7ff0705fe249aa9899add3baad1b148091b432ff..8d06d89621d78ec811b42ce71ae7e5ec07d9a0e8 100644
--- a/.github/workflows/misc-sync-templates.yml
+++ b/.github/workflows/misc-sync-templates.yml
@@ -21,6 +21,10 @@ on:
       stable_release_branch:
         description: 'Stable release branch, e.g. stable2407'
         required: true
+      debug:
+        description: Enable runner debug logging
+        required: false
+        default: false
 
 jobs:
   sync-templates:
@@ -86,7 +90,7 @@ jobs:
           EOF
 
           [ ${{ matrix.template }} != "solochain" ] && echo "# Leave out the node compilation from regular template usage." \
-            && echo "\"default-members\" = [\"pallets/template\", \"runtime\"]" >> Cargo.toml
+            && echo "default-members = [\"pallets/template\", \"runtime\"]" >> Cargo.toml
           [ ${{ matrix.template }} == "solochain" ] && echo "# The node isn't yet replaceable by Omni Node."
           cat << EOF >> Cargo.toml
           members = [
@@ -115,8 +119,9 @@ jobs:
           toml set templates/${{ matrix.template }}/Cargo.toml 'workspace.package.edition' "$(toml get --raw Cargo.toml 'workspace.package.edition')" > Cargo.temp
           mv Cargo.temp ./templates/${{ matrix.template }}/Cargo.toml
         working-directory: polkadot-sdk
+
       - name: Print the result Cargo.tomls for debugging
-        if: runner.debug == '1'
+        if: ${{ github.event.inputs.debug }}
         run: find . -type f -name 'Cargo.toml' -exec cat {} \;
         working-directory: polkadot-sdk/templates/${{ matrix.template }}/
 
@@ -142,6 +147,12 @@ jobs:
           done;
         working-directory: "${{ env.template-path }}"
 
+      - name: Print the result Cargo.tomls for debugging after copying required workspace dependencies
+        if: ${{ github.event.inputs.debug }}
+        run: find . -type f -name 'Cargo.toml' -exec cat {} \;
+        working-directory: polkadot-sdk/templates/${{ matrix.template }}/
+
+
       # 3. Verify the build. Push the changes or create a PR.
 
       # We've run into out-of-disk error when compiling in the next step, so we free up some space this way.
diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml
index 0222b2aa91e2171efa4d13032299b427891018b5..035b547603e1fb6246e32a3c295113a6135cc0bc 100644
--- a/.github/workflows/release-reusable-rc-buid.yml
+++ b/.github/workflows/release-reusable-rc-buid.yml
@@ -149,7 +149,6 @@ jobs:
       AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
       AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
       AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
-      SKIP_WASM_BUILD: 1
     steps:
       - name: Checkout sources
         uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
diff --git a/.github/workflows/reusable-preflight.yml b/.github/workflows/reusable-preflight.yml
index e1799adddcaf6435601894876c25034625827577..8487ab107d7c2ff5b080b39a209e04cc718ba1f9 100644
--- a/.github/workflows/reusable-preflight.yml
+++ b/.github/workflows/reusable-preflight.yml
@@ -203,6 +203,7 @@ jobs:
           echo $( substrate-contracts-node --version | awk 'NF' )
           estuary --version
           cargo-contract --version
+          taplo --version
 
       - name: Info forklift
         run: forklift version
diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json
index 104e73521331e95f4cc3c9c0b6734fcc28199a5f..ff16b7397247fdfaab8d6e95d9ab96da4a6c2fbe 100644
--- a/.github/workflows/runtimes-matrix.json
+++ b/.github/workflows/runtimes-matrix.json
@@ -145,7 +145,7 @@
   {
     "name": "glutton-westend",
     "package": "glutton-westend-runtime",
-    "path": "cumulus/parachains/runtimes/gluttons/glutton-westend",
+    "path": "cumulus/parachains/runtimes/glutton/glutton-westend",
     "header": "cumulus/file_header.txt",
     "template": "cumulus/templates/xcm-bench-template.hbs",
     "bench_features": "runtime-benchmarks",
diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml
index 14a235bcda8604dfee771d49ffb22163510df73b..878f241317a42a102e08e7267a9b5d09c1fc1771 100644
--- a/.gitlab/pipeline/zombienet/polkadot.yml
+++ b/.gitlab/pipeline/zombienet/polkadot.yml
@@ -160,39 +160,6 @@ zombienet-polkadot-functional-0010-validator-disabling:
       --local-dir="${LOCAL_DIR}/functional"
       --test="0010-validator-disabling.zndsl"
 
-.zombienet-polkadot-functional-0011-async-backing-6-seconds-rate:
-  extends:
-    - .zombienet-polkadot-common
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/functional"
-      --test="0011-async-backing-6-seconds-rate.zndsl"
-
-zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks:
-  extends:
-    - .zombienet-polkadot-common
-  variables:
-    FORCED_INFRA_INSTANCE: "spot-iops"
-  before_script:
-    - !reference [ .zombienet-polkadot-common, before_script ]
-    - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/elastic_scaling"
-      --test="0001-basic-3cores-6s-blocks.zndsl"
-
-.zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains:
-  extends:
-    - .zombienet-polkadot-common
-  before_script:
-    - !reference [ .zombienet-polkadot-common, before_script ]
-    - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/elastic_scaling"
-      --test="0002-elastic-scaling-doesnt-break-parachains.zndsl"
-
-
 .zombienet-polkadot-functional-0012-spam-statement-distribution-requests:
   extends:
     - .zombienet-polkadot-common
@@ -236,14 +203,6 @@ zombienet-polkadot-functional-0015-coretime-shared-core:
       --local-dir="${LOCAL_DIR}/functional"
       --test="0016-approval-voting-parallel.zndsl"
 
-.zombienet-polkadot-functional-0017-sync-backing:
-  extends:
-    - .zombienet-polkadot-common
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/functional"
-      --test="0017-sync-backing.zndsl"
-
 zombienet-polkadot-functional-0018-shared-core-idle-parachain:
   extends:
     - .zombienet-polkadot-common
@@ -386,6 +345,8 @@ zombienet-polkadot-malus-0001-dispute-valid:
       --local-dir="${LOCAL_DIR}/integrationtests"
       --test="0001-dispute-valid-block.zndsl"
 
+# sdk tests
+
 .zombienet-polkadot-coretime-revenue:
   extends:
     - .zombienet-polkadot-common
@@ -411,8 +372,78 @@ zombienet-polkadot-elastic-scaling-slot-based-3cores:
     - !reference [ ".zombienet-polkadot-common", "before_script" ]
     - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
     - export CUMULUS_IMAGE="docker.io/paritypr/test-parachain:${PIPELINE_IMAGE_TAG}"
+    - export X_INFRA_INSTANCE=spot # use spot by default
   script:
     # we want to use `--no-capture` in zombienet tests.
     - unset NEXTEST_FAILURE_OUTPUT
     - unset NEXTEST_SUCCESS_OUTPUT
     - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::slot_based_3cores::slot_based_3cores_test
+
+zombienet-polkadot-elastic-scaling-doesnt-break-parachains:
+  extends:
+    - .zombienet-polkadot-common
+  needs:
+    - job: build-polkadot-zombienet-tests
+      artifacts: true
+  before_script:
+    - !reference [ ".zombienet-polkadot-common", "before_script" ]
+    - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
+    - export X_INFRA_INSTANCE=spot # use spot by default
+  variables:
+    KUBERNETES_CPU_REQUEST: "1"
+  script:
+    # we want to use `--no-capture` in zombienet tests.
+    - unset NEXTEST_FAILURE_OUTPUT
+    - unset NEXTEST_SUCCESS_OUTPUT
+    - RUST_LOG=info,zombienet_=trace cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::doesnt_break_parachains::doesnt_break_parachains_test
+
+zombienet-polkadot-elastic-scaling-basic-3cores:
+  extends:
+    - .zombienet-polkadot-common
+  needs:
+    - job: build-polkadot-zombienet-tests
+      artifacts: true
+  before_script:
+    - !reference [ ".zombienet-polkadot-common", "before_script" ]
+    - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
+    - export CUMULUS_IMAGE="${COL_IMAGE}"
+    - export X_INFRA_INSTANCE=spot # use spot by default
+  script:
+    # we want to use `--no-capture` in zombienet tests.
+    - unset NEXTEST_FAILURE_OUTPUT
+    - unset NEXTEST_SUCCESS_OUTPUT
+    - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::basic_3cores::basic_3cores_test
+
+zombienet-polkadot-functional-sync-backing:
+  extends:
+    - .zombienet-polkadot-common
+  needs:
+    - job: build-polkadot-zombienet-tests
+      artifacts: true
+  before_script:
+    - !reference [ ".zombienet-polkadot-common", "before_script" ]
+    - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
+    # Hardcoded to an old polkadot-parachain image, pre async backing.
+    - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:master-99623e62"
+    - export X_INFRA_INSTANCE=spot # use spot by default
+  script:
+    # we want to use `--no-capture` in zombienet tests.
+    - unset NEXTEST_FAILURE_OUTPUT
+    - unset NEXTEST_SUCCESS_OUTPUT
+    - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- functional::sync_backing::sync_backing_test
+
+zombienet-polkadot-functional-async-backing-6-seconds-rate:
+  extends:
+    - .zombienet-polkadot-common
+  needs:
+    - job: build-polkadot-zombienet-tests
+      artifacts: true
+  before_script:
+    - !reference [ ".zombienet-polkadot-common", "before_script" ]
+    - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
+    - export X_INFRA_INSTANCE=spot # use spot by default
+  script:
+    # we want to use `--no-capture` in zombienet tests.
+    - unset NEXTEST_FAILURE_OUTPUT
+    - unset NEXTEST_SUCCESS_OUTPUT
+    - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- functional::async_backing_6_seconds_rate::async_backing_6_seconds_rate_test
diff --git a/Cargo.lock b/Cargo.lock
index 4fd911ffbcd0fabd20025a80251e0ab306150b7b..c651b9d00b6b477e35de88eb86a810ae643a8da7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -961,11 +961,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -1099,11 +1099,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -2671,11 +2671,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -2910,11 +2910,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -3650,11 +3650,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -3957,11 +3957,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -4100,11 +4100,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -4201,11 +4201,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -5079,6 +5079,25 @@ dependencies = [
  "sp-runtime 39.0.2",
 ]
 
+[[package]]
+name = "cumulus-pallet-weight-reclaim"
+version = "1.0.0"
+dependencies = [
+ "cumulus-primitives-proof-size-hostfunction 0.2.0",
+ "cumulus-primitives-storage-weight-reclaim 1.0.0",
+ "derivative",
+ "docify",
+ "frame-benchmarking 28.0.0",
+ "frame-support 28.0.0",
+ "frame-system 28.0.0",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-io 30.0.0",
+ "sp-runtime 31.0.1",
+ "sp-trie 29.0.0",
+]
+
 [[package]]
 name = "cumulus-pallet-xcm"
 version = "0.7.0"
@@ -5529,10 +5548,10 @@ dependencies = [
 name = "cumulus-test-client"
 version = "0.1.0"
 dependencies = [
+ "cumulus-pallet-weight-reclaim",
  "cumulus-primitives-core 0.7.0",
  "cumulus-primitives-parachain-inherent 0.7.0",
  "cumulus-primitives-proof-size-hostfunction 0.2.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-test-relay-sproof-builder 0.7.0",
  "cumulus-test-runtime",
  "cumulus-test-service",
@@ -5594,9 +5613,9 @@ version = "0.1.0"
 dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "frame-executive 28.0.0",
  "frame-support 28.0.0",
  "frame-system 28.0.0",
@@ -5648,8 +5667,8 @@ dependencies = [
  "cumulus-client-pov-recovery",
  "cumulus-client-service",
  "cumulus-pallet-parachain-system 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-relay-chain-inprocess-interface",
  "cumulus-relay-chain-interface",
  "cumulus-relay-chain-minimal-node",
@@ -6199,7 +6218,7 @@ dependencies = [
  "regex",
  "syn 2.0.87",
  "termcolor",
- "toml 0.8.12",
+ "toml 0.8.19",
  "walkdir",
 ]
 
@@ -7702,7 +7721,6 @@ dependencies = [
  "sp-externalities 0.25.0",
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
  "sp-version 29.0.0",
  "sp-weights 27.0.0",
  "substrate-test-runtime-client",
@@ -9764,29 +9782,6 @@ dependencies = [
  "libc",
 ]
 
-[[package]]
-name = "libp2p"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e94495eb319a85b70a68b85e2389a95bb3555c71c49025b78c691a854a7e6464"
-dependencies = [
- "bytes",
- "either",
- "futures",
- "futures-timer",
- "getrandom",
- "instant",
- "libp2p-allow-block-list 0.2.0",
- "libp2p-connection-limits 0.2.1",
- "libp2p-core 0.40.1",
- "libp2p-identity",
- "libp2p-swarm 0.43.7",
- "multiaddr 0.18.1",
- "pin-project",
- "rw-stream-sink",
- "thiserror",
-]
-
 [[package]]
 name = "libp2p"
 version = "0.54.1"
@@ -9798,9 +9793,9 @@ dependencies = [
  "futures",
  "futures-timer",
  "getrandom",
- "libp2p-allow-block-list 0.4.0",
- "libp2p-connection-limits 0.4.0",
- "libp2p-core 0.42.0",
+ "libp2p-allow-block-list",
+ "libp2p-connection-limits",
+ "libp2p-core",
  "libp2p-dns",
  "libp2p-identify",
  "libp2p-identity",
@@ -9811,7 +9806,7 @@ dependencies = [
  "libp2p-ping",
  "libp2p-quic",
  "libp2p-request-response",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "libp2p-tcp",
  "libp2p-upnp",
  "libp2p-websocket",
@@ -9822,39 +9817,15 @@ dependencies = [
  "thiserror",
 ]
 
-[[package]]
-name = "libp2p-allow-block-list"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311"
-dependencies = [
- "libp2p-core 0.40.1",
- "libp2p-identity",
- "libp2p-swarm 0.43.7",
- "void",
-]
-
 [[package]]
 name = "libp2p-allow-block-list"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041"
 dependencies = [
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
- "void",
-]
-
-[[package]]
-name = "libp2p-connection-limits"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58"
-dependencies = [
- "libp2p-core 0.40.1",
- "libp2p-identity",
- "libp2p-swarm 0.43.7",
+ "libp2p-swarm",
  "void",
 ]
 
@@ -9864,37 +9835,9 @@ version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8"
 dependencies = [
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
- "void",
-]
-
-[[package]]
-name = "libp2p-core"
-version = "0.40.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd44289ab25e4c9230d9246c475a22241e301b23e8f4061d3bdef304a1a99713"
-dependencies = [
- "either",
- "fnv",
- "futures",
- "futures-timer",
- "instant",
- "libp2p-identity",
- "log",
- "multiaddr 0.18.1",
- "multihash 0.19.1",
- "multistream-select",
- "once_cell",
- "parking_lot 0.12.3",
- "pin-project",
- "quick-protobuf 0.8.1",
- "rand",
- "rw-stream-sink",
- "smallvec",
- "thiserror",
- "unsigned-varint 0.7.2",
+ "libp2p-swarm",
  "void",
 ]
 
@@ -9935,7 +9878,7 @@ dependencies = [
  "async-trait",
  "futures",
  "hickory-resolver",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "parking_lot 0.12.3",
  "smallvec",
@@ -9953,9 +9896,9 @@ dependencies = [
  "futures",
  "futures-bounded",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "lru 0.12.3",
  "quick-protobuf 0.8.1",
  "quick-protobuf-codec",
@@ -9997,9 +9940,9 @@ dependencies = [
  "futures",
  "futures-bounded",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "quick-protobuf 0.8.1",
  "quick-protobuf-codec",
  "rand",
@@ -10022,9 +9965,9 @@ dependencies = [
  "futures",
  "hickory-proto",
  "if-watch",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "rand",
  "smallvec",
  "socket2 0.5.7",
@@ -10040,12 +9983,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566"
 dependencies = [
  "futures",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identify",
  "libp2p-identity",
  "libp2p-kad",
  "libp2p-ping",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "pin-project",
  "prometheus-client",
  "web-time",
@@ -10061,7 +10004,7 @@ dependencies = [
  "bytes",
  "curve25519-dalek 4.1.3",
  "futures",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "multiaddr 0.18.1",
  "multihash 0.19.1",
@@ -10086,9 +10029,9 @@ dependencies = [
  "either",
  "futures",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "rand",
  "tracing",
  "void",
@@ -10105,7 +10048,7 @@ dependencies = [
  "futures",
  "futures-timer",
  "if-watch",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "libp2p-tls",
  "parking_lot 0.12.3",
@@ -10129,9 +10072,9 @@ dependencies = [
  "futures",
  "futures-bounded",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "rand",
  "smallvec",
  "tracing",
@@ -10139,27 +10082,6 @@ dependencies = [
  "web-time",
 ]
 
-[[package]]
-name = "libp2p-swarm"
-version = "0.43.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "580189e0074af847df90e75ef54f3f30059aedda37ea5a1659e8b9fca05c0141"
-dependencies = [
- "either",
- "fnv",
- "futures",
- "futures-timer",
- "instant",
- "libp2p-core 0.40.1",
- "libp2p-identity",
- "log",
- "multistream-select",
- "once_cell",
- "rand",
- "smallvec",
- "void",
-]
-
 [[package]]
 name = "libp2p-swarm"
 version = "0.45.1"
@@ -10170,7 +10092,7 @@ dependencies = [
  "fnv",
  "futures",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "libp2p-swarm-derive",
  "lru 0.12.3",
@@ -10206,7 +10128,7 @@ dependencies = [
  "futures-timer",
  "if-watch",
  "libc",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "socket2 0.5.7",
  "tokio",
@@ -10221,7 +10143,7 @@ checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847"
 dependencies = [
  "futures",
  "futures-rustls",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "rcgen 0.11.3",
  "ring 0.17.8",
@@ -10241,8 +10163,8 @@ dependencies = [
  "futures",
  "futures-timer",
  "igd-next",
- "libp2p-core 0.42.0",
- "libp2p-swarm 0.45.1",
+ "libp2p-core",
+ "libp2p-swarm",
  "tokio",
  "tracing",
  "void",
@@ -10257,7 +10179,7 @@ dependencies = [
  "either",
  "futures",
  "futures-rustls",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "parking_lot 0.12.3",
  "pin-project-lite",
@@ -10277,7 +10199,7 @@ checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882"
 dependencies = [
  "either",
  "futures",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "thiserror",
  "tracing",
  "yamux 0.12.1",
@@ -11287,17 +11209,6 @@ dependencies = [
  "libc",
 ]
 
-[[package]]
-name = "nix"
-version = "0.27.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
-dependencies = [
- "bitflags 2.6.0",
- "cfg-if",
- "libc",
-]
-
 [[package]]
 name = "nix"
 version = "0.29.0"
@@ -12378,7 +12289,6 @@ dependencies = [
  "pallet-staking 28.0.0",
  "sp-core 28.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
  "sp-storage 19.0.0",
  "sp-tracing 16.0.0",
 ]
@@ -12974,7 +12884,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-keystore 0.34.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
  "staging-xcm 7.0.0",
  "staging-xcm-builder 7.0.0",
@@ -13025,7 +12934,7 @@ dependencies = [
  "parity-wasm",
  "sp-runtime 31.0.1",
  "tempfile",
- "toml 0.8.12",
+ "toml 0.8.19",
  "twox-hash",
 ]
 
@@ -13359,7 +13268,6 @@ dependencies = [
  "sp-npos-elections 26.0.0",
  "sp-runtime 31.0.1",
  "sp-staking 26.0.0",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
 ]
 
@@ -14055,18 +13963,13 @@ dependencies = [
 name = "pallet-mixnet"
 version = "0.4.0"
 dependencies = [
- "frame-benchmarking 28.0.0",
- "frame-support 28.0.0",
- "frame-system 28.0.0",
  "log",
  "parity-scale-codec",
+ "polkadot-sdk-frame 0.1.0",
  "scale-info",
  "serde",
  "sp-application-crypto 30.0.0",
- "sp-arithmetic 23.0.0",
- "sp-io 30.0.0",
  "sp-mixnet 0.4.0",
- "sp-runtime 31.0.1",
 ]
 
 [[package]]
@@ -14169,7 +14072,6 @@ dependencies = [
  "sp-core 28.0.0",
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
 ]
 
 [[package]]
@@ -14281,14 +14183,10 @@ dependencies = [
 name = "pallet-node-authorization"
 version = "28.0.0"
 dependencies = [
- "frame-support 28.0.0",
- "frame-system 28.0.0",
  "log",
  "parity-scale-codec",
+ "polkadot-sdk-frame 0.1.0",
  "scale-info",
- "sp-core 28.0.0",
- "sp-io 30.0.0",
- "sp-runtime 31.0.1",
 ]
 
 [[package]]
@@ -14444,7 +14342,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
  "sp-staking 26.0.0",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
 ]
 
@@ -14468,7 +14365,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
  "sp-staking 26.0.0",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
 ]
 
@@ -14882,7 +14778,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-keystore 0.34.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
  "staging-xcm 7.0.0",
  "staging-xcm-builder 7.0.0",
@@ -14960,7 +14855,7 @@ dependencies = [
  "polkavm-linker 0.18.0",
  "sp-core 28.0.0",
  "sp-io 30.0.0",
- "toml 0.8.12",
+ "toml 0.8.19",
 ]
 
 [[package]]
@@ -14975,7 +14870,7 @@ dependencies = [
  "polkavm-linker 0.10.0",
  "sp-runtime 39.0.2",
  "tempfile",
- "toml 0.8.12",
+ "toml 0.8.19",
 ]
 
 [[package]]
@@ -15107,7 +15002,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
  "sp-staking 26.0.0",
- "sp-std 14.0.0",
 ]
 
 [[package]]
@@ -15196,17 +15090,11 @@ dependencies = [
 name = "pallet-salary"
 version = "13.0.0"
 dependencies = [
- "frame-benchmarking 28.0.0",
- "frame-support 28.0.0",
- "frame-system 28.0.0",
  "log",
  "pallet-ranked-collective 28.0.0",
  "parity-scale-codec",
+ "polkadot-sdk-frame 0.1.0",
  "scale-info",
- "sp-arithmetic 23.0.0",
- "sp-core 28.0.0",
- "sp-io 30.0.0",
- "sp-runtime 31.0.1",
 ]
 
 [[package]]
@@ -15953,7 +15841,6 @@ dependencies = [
  "sp-core 28.0.0",
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
 ]
 
 [[package]]
@@ -16773,11 +16660,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "enumflags2",
  "frame-benchmarking 28.0.0",
@@ -16876,11 +16763,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "enumflags2",
  "frame-benchmarking 28.0.0",
@@ -18676,6 +18563,7 @@ dependencies = [
  "cumulus-pallet-parachain-system-proc-macro 0.6.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
  "cumulus-pallet-solo-to-para 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-ping 0.7.0",
@@ -19265,8 +19153,8 @@ dependencies = [
  "cumulus-client-service",
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-primitives-proof-size-hostfunction 0.2.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "docify",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -19341,6 +19229,7 @@ dependencies = [
  "sp-runtime 31.0.1",
  "sp-runtime-interface 24.0.0",
  "sp-std 14.0.0",
+ "sp-storage 19.0.0",
  "sp-tracing 16.0.0",
  "sp-version 29.0.0",
  "sp-weights 27.0.0",
@@ -19880,6 +19769,7 @@ dependencies = [
  "env_logger 0.11.3",
  "log",
  "parity-scale-codec",
+ "polkadot-primitives 7.0.0",
  "serde",
  "serde_json",
  "substrate-build-script-utils",
@@ -19956,12 +19846,6 @@ dependencies = [
  "log",
 ]
 
-[[package]]
-name = "polkavm-common"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "92c99f7eee94e7be43ba37eef65ad0ee8cbaf89b7c00001c3f6d2be985cb1817"
-
 [[package]]
 name = "polkavm-common"
 version = "0.9.0"
@@ -19991,15 +19875,6 @@ dependencies = [
  "polkavm-assembler 0.18.0",
 ]
 
-[[package]]
-name = "polkavm-derive"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "79fa916f7962348bd1bb1a65a83401675e6fc86c51a0fdbcf92a3108e58e6125"
-dependencies = [
- "polkavm-derive-impl-macro 0.8.0",
-]
-
 [[package]]
 name = "polkavm-derive"
 version = "0.9.1"
@@ -20027,18 +19902,6 @@ dependencies = [
  "polkavm-derive-impl-macro 0.18.0",
 ]
 
-[[package]]
-name = "polkavm-derive-impl"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c10b2654a8a10a83c260bfb93e97b262cf0017494ab94a65d389e0eda6de6c9c"
-dependencies = [
- "polkavm-common 0.8.0",
- "proc-macro2 1.0.86",
- "quote 1.0.37",
- "syn 2.0.87",
-]
-
 [[package]]
 name = "polkavm-derive-impl"
 version = "0.9.0"
@@ -20075,16 +19938,6 @@ dependencies = [
  "syn 2.0.87",
 ]
 
-[[package]]
-name = "polkavm-derive-impl-macro"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "15e85319a0d5129dc9f021c62607e0804f5fb777a05cdda44d750ac0732def66"
-dependencies = [
- "polkavm-derive-impl 0.8.0",
- "syn 2.0.87",
-]
-
 [[package]]
 name = "polkavm-derive-impl-macro"
 version = "0.9.0"
@@ -21479,12 +21332,12 @@ version = "0.6.0"
 dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-ping 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -23068,7 +22921,7 @@ dependencies = [
  "futures",
  "futures-timer",
  "ip_network",
- "libp2p 0.54.1",
+ "libp2p",
  "linked_hash_set",
  "litep2p",
  "log",
@@ -23244,7 +23097,7 @@ dependencies = [
  "async-trait",
  "futures",
  "futures-timer",
- "libp2p 0.54.1",
+ "libp2p",
  "log",
  "parking_lot 0.12.3",
  "rand",
@@ -23693,7 +23546,6 @@ dependencies = [
  "sp-crypto-hashing 0.1.0",
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
 ]
 
 [[package]]
@@ -23702,7 +23554,7 @@ version = "15.0.0"
 dependencies = [
  "chrono",
  "futures",
- "libp2p 0.54.1",
+ "libp2p",
  "log",
  "parking_lot 0.12.3",
  "pin-project",
@@ -26204,53 +26056,6 @@ dependencies = [
  "zeroize",
 ]
 
-[[package]]
-name = "sp-core"
-version = "31.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26d7a0fd8f16dcc3761198fc83be12872f823b37b749bc72a3a6a1f702509366"
-dependencies = [
- "array-bytes",
- "bitflags 1.3.2",
- "blake2 0.10.6",
- "bounded-collections",
- "bs58",
- "dyn-clonable",
- "ed25519-zebra 3.1.0",
- "futures",
- "hash-db",
- "hash256-std-hasher",
- "impl-serde 0.4.0",
- "itertools 0.10.5",
- "k256",
- "libsecp256k1",
- "log",
- "merlin",
- "parity-bip39",
- "parity-scale-codec",
- "parking_lot 0.12.3",
- "paste",
- "primitive-types 0.12.2",
- "rand",
- "scale-info",
- "schnorrkel 0.11.4",
- "secp256k1 0.28.2",
- "secrecy 0.8.0",
- "serde",
- "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-externalities 0.27.0",
- "sp-runtime-interface 26.0.0",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-storage 20.0.0",
- "ss58-registry",
- "substrate-bip39 0.5.0",
- "thiserror",
- "tracing",
- "w3f-bls",
- "zeroize",
-]
-
 [[package]]
 name = "sp-core"
 version = "32.0.0"
@@ -26591,18 +26396,6 @@ dependencies = [
  "sp-storage 19.0.0",
 ]
 
-[[package]]
-name = "sp-externalities"
-version = "0.27.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1d6a4572eadd4a63cff92509a210bf425501a0c5e76574b30a366ac77653787"
-dependencies = [
- "environmental",
- "parity-scale-codec",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-storage 20.0.0",
-]
-
 [[package]]
 name = "sp-externalities"
 version = "0.28.0"
@@ -27187,26 +26980,6 @@ dependencies = [
  "trybuild",
 ]
 
-[[package]]
-name = "sp-runtime-interface"
-version = "26.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e48a675ea4858333d4d755899ed5ed780174aa34fec15953428d516af5452295"
-dependencies = [
- "bytes",
- "impl-trait-for-tuples",
- "parity-scale-codec",
- "polkavm-derive 0.8.0",
- "primitive-types 0.12.2",
- "sp-externalities 0.27.0",
- "sp-runtime-interface-proc-macro 18.0.0",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-storage 20.0.0",
- "sp-tracing 16.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-wasm-interface 20.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "static_assertions",
-]
-
 [[package]]
 name = "sp-runtime-interface"
 version = "27.0.0"
@@ -27564,20 +27337,6 @@ dependencies = [
  "sp-debug-derive 14.0.0",
 ]
 
-[[package]]
-name = "sp-storage"
-version = "20.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8dba5791cb3978e95daf99dad919ecb3ec35565604e88cd38d805d9d4981e8bd"
-dependencies = [
- "impl-serde 0.4.0",
- "parity-scale-codec",
- "ref-cast",
- "serde",
- "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
 [[package]]
 name = "sp-storage"
 version = "21.0.0"
@@ -27649,19 +27408,6 @@ dependencies = [
  "tracing-subscriber 0.3.18",
 ]
 
-[[package]]
-name = "sp-tracing"
-version = "16.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0351810b9d074df71c4514c5228ed05c250607cba131c1c9d1526760ab69c05c"
-dependencies = [
- "parity-scale-codec",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "tracing",
- "tracing-core",
- "tracing-subscriber 0.2.25",
-]
-
 [[package]]
 name = "sp-tracing"
 version = "17.0.1"
@@ -27918,20 +27664,6 @@ dependencies = [
  "wasmtime",
 ]
 
-[[package]]
-name = "sp-wasm-interface"
-version = "20.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ef97172c42eb4c6c26506f325f48463e9bc29b2034a587f1b9e48c751229bee"
-dependencies = [
- "anyhow",
- "impl-trait-for-tuples",
- "log",
- "parity-scale-codec",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmtime",
-]
-
 [[package]]
 name = "sp-wasm-interface"
 version = "21.0.1"
@@ -28471,19 +28203,6 @@ dependencies = [
  "zeroize",
 ]
 
-[[package]]
-name = "substrate-bip39"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2b564c293e6194e8b222e52436bcb99f60de72043c7f845cf6c4406db4df121"
-dependencies = [
- "hmac 0.12.1",
- "pbkdf2",
- "schnorrkel 0.11.4",
- "sha2 0.10.8",
- "zeroize",
-]
-
 [[package]]
 name = "substrate-bip39"
 version = "0.6.0"
@@ -28824,7 +28543,7 @@ dependencies = [
  "sp-version 29.0.0",
  "strum 0.26.3",
  "tempfile",
- "toml 0.8.12",
+ "toml 0.8.19",
  "walkdir",
  "wasm-opt",
 ]
@@ -28845,7 +28564,7 @@ dependencies = [
  "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "strum 0.26.3",
  "tempfile",
- "toml 0.8.12",
+ "toml 0.8.19",
  "walkdir",
  "wasm-opt",
 ]
@@ -29842,33 +29561,21 @@ dependencies = [
 
 [[package]]
 name = "toml"
-version = "0.7.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257"
-dependencies = [
- "serde",
- "serde_spanned",
- "toml_datetime",
- "toml_edit 0.19.15",
-]
-
-[[package]]
-name = "toml"
-version = "0.8.12"
+version = "0.8.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3"
+checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
 dependencies = [
  "serde",
  "serde_spanned",
  "toml_datetime",
- "toml_edit 0.22.12",
+ "toml_edit 0.22.22",
 ]
 
 [[package]]
 name = "toml_datetime"
-version = "0.6.5"
+version = "0.6.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
+checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
 dependencies = [
  "serde",
 ]
@@ -29880,8 +29587,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
 dependencies = [
  "indexmap 2.7.0",
- "serde",
- "serde_spanned",
  "toml_datetime",
  "winnow 0.5.15",
 ]
@@ -29899,9 +29604,9 @@ dependencies = [
 
 [[package]]
 name = "toml_edit"
-version = "0.22.12"
+version = "0.22.22"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef"
+checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
 dependencies = [
  "indexmap 2.7.0",
  "serde",
@@ -32103,9 +31808,9 @@ dependencies = [
 
 [[package]]
 name = "zombienet-configuration"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d716b3ff8112d98ced15f53b0c72454f8cde533fe2b68bb04379228961efbd80"
+checksum = "5ced2fca1322821431f03d06dcf2ea74d3a7369760b6c587b372de6eada3ce43"
 dependencies = [
  "anyhow",
  "lazy_static",
@@ -32116,23 +31821,23 @@ dependencies = [
  "serde_json",
  "thiserror",
  "tokio",
- "toml 0.7.8",
+ "toml 0.8.19",
  "url",
  "zombienet-support",
 ]
 
 [[package]]
 name = "zombienet-orchestrator"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4098a7d33b729b59e32c41a87aa4d484bd1b8771a059bbd4edfb4d430b3b2d74"
+checksum = "86ecd17133c3129547b6472591b5e58d4aee1fc63c965a3418fd56d33a8a4e82"
 dependencies = [
  "anyhow",
  "async-trait",
  "futures",
  "glob-match",
  "hex",
- "libp2p 0.52.4",
+ "libp2p",
  "libsecp256k1",
  "multiaddr 0.18.1",
  "rand",
@@ -32141,7 +31846,7 @@ dependencies = [
  "serde",
  "serde_json",
  "sha2 0.10.8",
- "sp-core 31.0.0",
+ "sp-core 34.0.0",
  "subxt",
  "subxt-signer",
  "thiserror",
@@ -32156,9 +31861,9 @@ dependencies = [
 
 [[package]]
 name = "zombienet-prom-metrics-parser"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "961e30be45b34f6ebeabf29ee2f47b0cd191ea62e40c064752572207509a6f5c"
+checksum = "23702db0819a050c8a0130a769b105695137020a64207b4597aa021f06924552"
 dependencies = [
  "pest",
  "pest_derive",
@@ -32167,9 +31872,9 @@ dependencies = [
 
 [[package]]
 name = "zombienet-provider"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab0f7f01780b7c99a6c40539d195d979f234305f32808d547438b50829d44262"
+checksum = "83e903843c62cd811e7730ccc618dcd14444d20e8aadfcd7d7561c7b47d8f984"
 dependencies = [
  "anyhow",
  "async-trait",
@@ -32178,7 +31883,7 @@ dependencies = [
  "hex",
  "k8s-openapi",
  "kube",
- "nix 0.27.1",
+ "nix 0.29.0",
  "regex",
  "reqwest 0.11.27",
  "serde",
@@ -32198,9 +31903,9 @@ dependencies = [
 
 [[package]]
 name = "zombienet-sdk"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "99a3c5f2d657235b3ab7dc384677e63cde21983029e99106766ecd49e9f8d7f3"
+checksum = "e457b12c8fdc7003c12dd56855da09812ac11dd232e4ec01acccb2899fe05e44"
 dependencies = [
  "async-trait",
  "futures",
@@ -32216,14 +31921,14 @@ dependencies = [
 
 [[package]]
 name = "zombienet-support"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "296f887ea88e07edd771f8e1d0dec5297a58b422f4b884a6292a21ebe03277cb"
+checksum = "43547d65b19a92cf0ee44380239d82ef345e7d26f7b04b9e0ecf48496af6346b"
 dependencies = [
  "anyhow",
  "async-trait",
  "futures",
- "nix 0.27.1",
+ "nix 0.29.0",
  "rand",
  "regex",
  "reqwest 0.11.27",
diff --git a/Cargo.toml b/Cargo.toml
index 008df04ad2a24350b0e2e89e9fdfde644eb39132..46666a942e4453e2ae68eabf2c9998eb08cb9152 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -83,6 +83,7 @@ members = [
 	"cumulus/pallets/parachain-system/proc-macro",
 	"cumulus/pallets/session-benchmarking",
 	"cumulus/pallets/solo-to-para",
+	"cumulus/pallets/weight-reclaim",
 	"cumulus/pallets/xcm",
 	"cumulus/pallets/xcmp-queue",
 	"cumulus/parachains/common",
@@ -718,6 +719,7 @@ cumulus-pallet-parachain-system = { path = "cumulus/pallets/parachain-system", d
 cumulus-pallet-parachain-system-proc-macro = { path = "cumulus/pallets/parachain-system/proc-macro", default-features = false }
 cumulus-pallet-session-benchmarking = { path = "cumulus/pallets/session-benchmarking", default-features = false }
 cumulus-pallet-solo-to-para = { path = "cumulus/pallets/solo-to-para", default-features = false }
+cumulus-pallet-weight-reclaim = { path = "cumulus/pallets/weight-reclaim", default-features = false }
 cumulus-pallet-xcm = { path = "cumulus/pallets/xcm", default-features = false }
 cumulus-pallet-xcmp-queue = { path = "cumulus/pallets/xcmp-queue", default-features = false }
 cumulus-ping = { path = "cumulus/parachains/pallets/ping", default-features = false }
@@ -1391,7 +1393,7 @@ xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false }
 xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false }
 xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false }
 zeroize = { version = "1.7.0", default-features = false }
-zombienet-sdk = { version = "0.2.19" }
+zombienet-sdk = { version = "0.2.20" }
 zstd = { version = "0.12.4", default-features = false }
 
 [profile.release]
diff --git a/README.md b/README.md
index 6c0dfbb2e7e4255efb7bd925789bbc6d5fd7862a..24352cc28a1a98bed2719583ecc0d2a37ae12399 100644
--- a/README.md
+++ b/README.md
@@ -40,9 +40,9 @@ curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/paritytec
 <!-- markdownlint-disable-next-line MD013 -->
 ![Current Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-latest.svg)&nbsp;&nbsp;![Next Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-next.svg)
 
-The Polkadot SDK is released every three months as a `stableYYMMDD` release. They are supported for
+The Polkadot SDK is released every three months as a `stableYYMM` release. They are supported for
 one year with patches. See the next upcoming versions in the [Release
-Registry](https://github.com/paritytech/release-registry/).
+Registry](https://github.com/paritytech/release-registry/) and more docs in [RELEASE.md](./docs/RELEASE.md).
 
 You can use [`psvm`](https://github.com/paritytech/psvm) to update all dependencies to a specific
 version without needing to manually select the correct version for each crate.
diff --git a/bridges/SECURITY.md b/bridges/SECURITY.md
index 9f215c88765474e6b211882296c8cf190f216780..ea19eca42cc35c844ef38ded1820a5accaaac827 100644
--- a/bridges/SECURITY.md
+++ b/bridges/SECURITY.md
@@ -13,6 +13,6 @@ If you think that your report might be eligible for the Bug Bounty Program, plea
 Please check up-to-date [Parity Bug Bounty Program rules](https://www.parity.io/bug-bounty) to find out the information
 about our Bug Bounty Program.
 
-**Warning**: This is an unified SECURITY.md file for Paritytech GitHub Organization. The presence of this file does not
+**Warning**: This is a unified SECURITY.md file for Paritytech GitHub Organization. The presence of this file does not
 mean that this repository is covered by the Bug Bounty program. Please always check the Bug Bounty Program scope for
 information.
diff --git a/bridges/modules/messages/README.md b/bridges/modules/messages/README.md
index a78c8680249851b86fe09c42d66d36914ca8dd80..7d9a23b4ba1457c19e41543ff53031c686c66eeb 100644
--- a/bridges/modules/messages/README.md
+++ b/bridges/modules/messages/README.md
@@ -13,7 +13,7 @@ module and the final goal is to hand message to the message dispatch mechanism.
 
 ## Overview
 
-Message lane is an unidirectional channel, where messages are sent from source chain to the target chain. At the same
+Message lane is a unidirectional channel, where messages are sent from source chain to the target chain. At the same
 time, a single instance of messages module supports both outbound lanes and inbound lanes. So the chain where the module
 is deployed (this chain), may act as a source chain for outbound messages (heading to a bridged chain) and as a target
 chain for inbound messages (coming from a bridged chain).
diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs
index 00adcdfa186adc755273b9753d833a6bdbec5b56..cb4232376c6fc98c21485aee7e3f5980cf8f941d 100644
--- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs
+++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs
@@ -3,5 +3,6 @@
 #![cfg_attr(not(feature = "std"), no_std)]
 
 pub mod register_token;
+pub mod send_native_eth;
 pub mod send_token;
 pub mod send_token_to_penpal;
diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs
new file mode 100755
index 0000000000000000000000000000000000000000..d3e8d76e6b395081869b85e4b5d1541665a1259c
--- /dev/null
+++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2023 Snowfork <hello@snowfork.com>
+// Generated, do not edit!
+// See ethereum client README.md for instructions to generate
+
+use hex_literal::hex;
+use snowbridge_beacon_primitives::{
+	types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader,
+};
+use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof};
+use sp_core::U256;
+use sp_std::vec;
+
+pub fn make_send_native_eth_message() -> InboundQueueFixture {
+	InboundQueueFixture {
+        message: Message {
+            event_log: 	Log {
+                address: hex!("87d1f7fdfee7f651fabc8bfcb6e086c278b77a7d").into(),
+                topics: vec![
+                    hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(),
+                    hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(),
+                    hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(),
+                ],
+                data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa0000000000010000000000000000000000000000000000000000008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e8764817000000000000000000000000").into(),
+            },
+            proof: Proof {
+                receipt_proof: (vec![
+                    hex!("17cd4d05dde30703008a4f213205923630cff8e6bc9d5d95a52716bfb5551fd7").to_vec(),
+                ], vec![
+                    hex!("f903b4822080b903ae02f903aa018301a7fcb9010000000000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000000000080000000000020000000000000000000800010100000000000000000000000000000000000200000000000000000000000000001000000040080008000000000000000000040000000021000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000200000000000000f9029ff9015d9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a00000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8c000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa0000000000010000000000000000000000000000000000000000008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e8764817000000000000000000000000").to_vec(),
+                ]),
+                execution_proof: ExecutionProof {
+                    header: BeaconHeader {
+                        slot: 246,
+                        proposer_index: 7,
+                        parent_root: hex!("4faaac5d2fa0b8884fe1175c7cac1c92aac9eba5a20b4302edb98a56428c5974").into(),
+                        state_root: hex!("882c13f1d56df781e3444a78cae565bfa1c89822c86cdb0daea71f5351231580").into(),
+                        body_root: hex!("c47eb72204b1ca567396dacef8b0214027eb7f0789330b55166085d1f9cb4c65").into(),
+                    },
+                        ancestry_proof: Some(AncestryProof {
+                        header_branch: vec![
+                            hex!("38e2454bc93c4cfafcea772b8531e4802bbd2561366620699096dd4e591bc488").into(),
+                            hex!("3d7389fb144ccaeca8b8e1667ce1d1538dfceb50bf1e49c4b368a223f051fda3").into(),
+                            hex!("0d49c9c24137ad4d86ebca2f36a159573a68b5d5d60e317776c77cc8b6093034").into(),
+                            hex!("0fadc6735bcdc2793a5039a806fbf39984c39374ed4d272c1147e1c23df88983").into(),
+                            hex!("3a058ad4b169eebb4c754c8488d41e56a7a0e5f8b55b5ec67452a8d326585c69").into(),
+                            hex!("de200426caa9bc03f8e0033b4ef4df1db6501924b5c10fb7867e76db942b903c").into(),
+                            hex!("48b578632bc40eebb517501f179ffdd06d762c03e9383df16fc651eeddd18806").into(),
+                            hex!("98d9d6904b2a6a285db4c4ae59a07100cd38ec4d9fb7a16a10fe83ec99e6ba1d").into(),
+                            hex!("1b2bbae6e684864b714654a60778664e63ba6c3c9bed8074ec1a0380fe5042e6").into(),
+                            hex!("eb907a888eadf5a7e2bd0a3a5a9369e409c7aa688bd4cde758d5b608c6c82785").into(),
+                            hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(),
+                            hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(),
+                            hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(),
+                        ],
+                        finalized_block_root: hex!("440615588532ce496a93d189cb0ef1df7cf67d529faee0fd03213ce26ea115e5").into(),
+                        }),
+                    execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader {
+                        parent_hash: hex!("a8c89213b7d7d2ac76462d89e6a7384374db905b657ad803d3c86f88f86c39df").into(),
+                        fee_recipient: hex!("0000000000000000000000000000000000000000").into(),
+                        state_root: hex!("a1e8175213a6a43da17fae65109245867cbc60e3ada16b8ac28c6b208761c772").into(),
+                        receipts_root: hex!("17cd4d05dde30703008a4f213205923630cff8e6bc9d5d95a52716bfb5551fd7").into(),
+                        logs_bloom: hex!("00000000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000000000080000000000020000000000000000000800010100000000000000000000000000000000000200000000000000000000000000001000000040080008000000000000000000040000000021000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000200000000000000").into(),
+                        prev_randao: hex!("b9b26dc14ea8c57d069fde0c94ad31c2558365c3986a0c06558470f8c02e62ce").into(),
+                        block_number: 246,
+                        gas_limit: 62908420,
+                        gas_used: 108540,
+                        timestamp: 1734718384,
+                        extra_data: hex!("d983010e08846765746888676f312e32322e358664617277696e").into(),
+                        base_fee_per_gas: U256::from(7u64),
+                        block_hash: hex!("878195e2ea83c74d475363d03d41a7fbfc4026d6e5bcffb713928253984a64a7").into(),
+                        transactions_root: hex!("909139b3137666b4551b629ce6d9fb7e5e6f6def8a48d078448ec6600fe63c7f").into(),
+                        withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(),
+                        blob_gas_used: 0,
+                        excess_blob_gas: 0,
+                    }),
+                    execution_branch: vec![
+                            hex!("5d78e26ea639df17c2194ff925f782b9522009d58cfc60e3d34ba79a19f8faf1").into(),
+                            hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(),
+                            hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(),
+                            hex!("3d84b2809a36450186e5169995a5e3cab55d751aee90fd8456b33d871ccaa463").into(),
+                    ],
+                }
+            },
+        },
+        finalized_header: BeaconHeader {
+            slot: 608,
+            proposer_index: 3,
+            parent_root: hex!("f10c2349530dbd339a72886270e2e304bb68155af68c918c850acd9ab341350f").into(),
+            state_root: hex!("6df0ef4cbb4986a84ff0763727402b88636e6b5535022cd3ad6967b8dd799402").into(),
+            body_root: hex!("f66fc1c022f07f91c777ad5c464625fc0b43d3e7a45650567dce60011210f574").into(),
+        },
+        block_roots_root: hex!("1c0dbf54db070770f5e573b72afe0aac2b0e3cf312107d1cd73bf64d7a2ed90c").into(),
+    }
+}
diff --git a/bridges/snowbridge/primitives/router/src/inbound/mock.rs b/bridges/snowbridge/primitives/router/src/inbound/mock.rs
new file mode 100644
index 0000000000000000000000000000000000000000..537853b324f633990b1c52c4eccef3a224755a30
--- /dev/null
+++ b/bridges/snowbridge/primitives/router/src/inbound/mock.rs
@@ -0,0 +1,48 @@
+use crate::inbound::{MessageToXcm, TokenId};
+use frame_support::parameter_types;
+use sp_runtime::{
+	traits::{IdentifyAccount, MaybeEquivalence, Verify},
+	MultiSignature,
+};
+use xcm::{latest::WESTEND_GENESIS_HASH, prelude::*};
+
+pub const CHAIN_ID: u64 = 11155111;
+pub const NETWORK: NetworkId = Ethereum { chain_id: CHAIN_ID };
+
+parameter_types! {
+	pub EthereumNetwork: NetworkId = NETWORK;
+
+	pub const CreateAssetCall: [u8;2] = [53, 0];
+	pub const CreateAssetExecutionFee: u128 = 2_000_000_000;
+	pub const CreateAssetDeposit: u128 = 100_000_000_000;
+	pub const SendTokenExecutionFee: u128 = 1_000_000_000;
+	pub const InboundQueuePalletInstance: u8 = 80;
+	pub UniversalLocation: InteriorLocation =
+		[GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1002)].into();
+	pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)),Parachain(1000)]);
+}
+
+type Signature = MultiSignature;
+type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
+type Balance = u128;
+
+pub(crate) struct MockTokenIdConvert;
+impl MaybeEquivalence<TokenId, Location> for MockTokenIdConvert {
+	fn convert(_id: &TokenId) -> Option<Location> {
+		Some(Location::parent())
+	}
+	fn convert_back(_loc: &Location) -> Option<TokenId> {
+		None
+	}
+}
+
+pub(crate) type MessageConverter = MessageToXcm<
+	CreateAssetCall,
+	CreateAssetDeposit,
+	InboundQueuePalletInstance,
+	AccountId,
+	Balance,
+	MockTokenIdConvert,
+	UniversalLocation,
+	AssetHubFromEthereum,
+>;
diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs
index bc5d401cd4f73a3d1558c3698dc5f5ae786788ee..1c210afb1f7403d84dd8fc2f0acf35cb3063a171 100644
--- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs
+++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs
@@ -2,6 +2,8 @@
 // SPDX-FileCopyrightText: 2023 Snowfork <hello@snowfork.com>
 //! Converts messages from Ethereum to XCM messages
 
+#[cfg(test)]
+mod mock;
 #[cfg(test)]
 mod tests;
 
@@ -394,10 +396,16 @@ where
 
 	// Convert ERC20 token address to a location that can be understood by Assets Hub.
 	fn convert_token_address(network: NetworkId, token: H160) -> Location {
-		Location::new(
-			2,
-			[GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }],
-		)
+		// If the token is `0x0000000000000000000000000000000000000000` then return the location of
+		// native Ether.
+		if token == H160([0; 20]) {
+			Location::new(2, [GlobalConsensus(network)])
+		} else {
+			Location::new(
+				2,
+				[GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }],
+			)
+		}
 	}
 
 	/// Constructs an XCM message destined for AssetHub that withdraws assets from the sovereign
diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs
index 786aa594f653eec5e160a0a15ed4df638da8e728..11d7928602c6ecb07b1cebe0086a8fa52643eb78 100644
--- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs
+++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs
@@ -1,21 +1,12 @@
 use super::EthereumLocationsConverterFor;
-use crate::inbound::CallIndex;
-use frame_support::{assert_ok, parameter_types};
+use crate::inbound::{
+	mock::*, Command, ConvertMessage, Destination, MessageV1, VersionedMessage, H160,
+};
+use frame_support::assert_ok;
 use hex_literal::hex;
 use xcm::prelude::*;
 use xcm_executor::traits::ConvertLocation;
 
-const NETWORK: NetworkId = Ethereum { chain_id: 11155111 };
-
-parameter_types! {
-	pub EthereumNetwork: NetworkId = NETWORK;
-
-	pub const CreateAssetCall: CallIndex = [1, 1];
-	pub const CreateAssetExecutionFee: u128 = 123;
-	pub const CreateAssetDeposit: u128 = 891;
-	pub const SendTokenExecutionFee: u128 = 592;
-}
-
 #[test]
 fn test_ethereum_network_converts_successfully() {
 	let expected_account: [u8; 32] =
@@ -81,3 +72,74 @@ fn test_reanchor_all_assets() {
 		assert_eq!(reanchored_asset_with_ethereum_context, asset.clone());
 	}
 }
+
+#[test]
+fn test_convert_send_token_with_weth() {
+	const WETH: H160 = H160([0xff; 20]);
+	const AMOUNT: u128 = 1_000_000;
+	const FEE: u128 = 1_000;
+	const ACCOUNT_ID: [u8; 32] = [0xBA; 32];
+	const MESSAGE: VersionedMessage = VersionedMessage::V1(MessageV1 {
+		chain_id: CHAIN_ID,
+		command: Command::SendToken {
+			token: WETH,
+			destination: Destination::AccountId32 { id: ACCOUNT_ID },
+			amount: AMOUNT,
+			fee: FEE,
+		},
+	});
+	let result = MessageConverter::convert([1; 32].into(), MESSAGE);
+	assert_ok!(&result);
+	let (xcm, fee) = result.unwrap();
+	assert_eq!(FEE, fee);
+
+	let expected_assets = ReserveAssetDeposited(
+		vec![Asset {
+			id: AssetId(Location {
+				parents: 2,
+				interior: Junctions::X2(
+					[GlobalConsensus(NETWORK), AccountKey20 { network: None, key: WETH.into() }]
+						.into(),
+				),
+			}),
+			fun: Fungible(AMOUNT),
+		}]
+		.into(),
+	);
+	let actual_assets = xcm.into_iter().find(|x| matches!(x, ReserveAssetDeposited(..)));
+	assert_eq!(actual_assets, Some(expected_assets))
+}
+
+#[test]
+fn test_convert_send_token_with_eth() {
+	const ETH: H160 = H160([0x00; 20]);
+	const AMOUNT: u128 = 1_000_000;
+	const FEE: u128 = 1_000;
+	const ACCOUNT_ID: [u8; 32] = [0xBA; 32];
+	const MESSAGE: VersionedMessage = VersionedMessage::V1(MessageV1 {
+		chain_id: CHAIN_ID,
+		command: Command::SendToken {
+			token: ETH,
+			destination: Destination::AccountId32 { id: ACCOUNT_ID },
+			amount: AMOUNT,
+			fee: FEE,
+		},
+	});
+	let result = MessageConverter::convert([1; 32].into(), MESSAGE);
+	assert_ok!(&result);
+	let (xcm, fee) = result.unwrap();
+	assert_eq!(FEE, fee);
+
+	let expected_assets = ReserveAssetDeposited(
+		vec![Asset {
+			id: AssetId(Location {
+				parents: 2,
+				interior: Junctions::X1([GlobalConsensus(NETWORK)].into()),
+			}),
+			fun: Fungible(AMOUNT),
+		}]
+		.into(),
+	);
+	let actual_assets = xcm.into_iter().find(|x| matches!(x, ReserveAssetDeposited(..)));
+	assert_eq!(actual_assets, Some(expected_assets))
+}
diff --git a/bridges/snowbridge/primitives/router/src/outbound/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/mod.rs
index 3b5dbdb77c89227d053548375b0c30b47792cea9..622c408070150392c00f80c30c58ad238cefbebd 100644
--- a/bridges/snowbridge/primitives/router/src/outbound/mod.rs
+++ b/bridges/snowbridge/primitives/router/src/outbound/mod.rs
@@ -289,8 +289,13 @@ where
 		let (token, amount) = match reserve_asset {
 			Asset { id: AssetId(inner_location), fun: Fungible(amount) } =>
 				match inner_location.unpack() {
+					// Get the ERC20 contract address of the token.
 					(0, [AccountKey20 { network, key }]) if self.network_matches(network) =>
 						Some((H160(*key), *amount)),
+					// If there is no ERC20 contract address in the location then signal to the
+					// gateway that is a native Ether transfer by using
+					// `0x0000000000000000000000000000000000000000` as the token address.
+					(0, []) => Some((H160([0; 20]), *amount)),
 					_ => None,
 				},
 			_ => None,
diff --git a/bridges/snowbridge/primitives/router/src/outbound/tests.rs b/bridges/snowbridge/primitives/router/src/outbound/tests.rs
index 44f81ce31b3a8f4761a68fd5ca2496a5d79320bf..2a60f9f3e0eab8569a5a1300b03d423243e7789a 100644
--- a/bridges/snowbridge/primitives/router/src/outbound/tests.rs
+++ b/bridges/snowbridge/primitives/router/src/outbound/tests.rs
@@ -515,6 +515,46 @@ fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() {
 	assert_eq!(result, Ok((expected_payload, [0; 32])));
 }
 
+#[test]
+fn xcm_converter_convert_with_native_eth_succeeds() {
+	let network = BridgedNetwork::get();
+
+	let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000");
+
+	// The asset is `{ parents: 0, interior: X1(Here) }` relative to ethereum.
+	let assets: Assets = vec![Asset { id: AssetId([].into()), fun: Fungible(1000) }].into();
+	let filter: AssetFilter = Wild(All);
+
+	let message: Xcm<()> = vec![
+		WithdrawAsset(assets.clone()),
+		ClearOrigin,
+		BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited },
+		DepositAsset {
+			assets: filter,
+			beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(),
+		},
+		SetTopic([0; 32]),
+	]
+	.into();
+
+	let mut converter =
+		XcmConverter::<MockTokenIdConvert, ()>::new(&message, network, Default::default());
+
+	// The token address that is expected to be sent should be
+	// `0x0000000000000000000000000000000000000000`. The solidity will
+	// interpret this as a transfer of ETH.
+	let expected_payload = Command::AgentExecute {
+		agent_id: Default::default(),
+		command: AgentExecuteCommand::TransferToken {
+			token: H160([0; 20]),
+			recipient: beneficiary_address.into(),
+			amount: 1000,
+		},
+	};
+	let result = converter.convert();
+	assert_eq!(result, Ok((expected_payload, [0; 32])));
+}
+
 #[test]
 fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() {
 	let network = BridgedNetwork::get();
diff --git a/cumulus/README.md b/cumulus/README.md
index 0c47df999022fa394e31c2753c5712aaf57dfb5b..400f9481c3fbeedd944416ff2a885a2a5867c416 100644
--- a/cumulus/README.md
+++ b/cumulus/README.md
@@ -4,7 +4,7 @@
 
 This repository contains both the Cumulus SDK and also specific chains implemented on top of this SDK.
 
-If you only want to run a **Polkadot Parachain Node**, check out our [container section](./docs/contributor/container.md).
+If you only want to run a **Polkadot Parachain Node**, check out our [container section](../docs/contributor/container.md).
 
 ## Cumulus SDK
 
@@ -34,7 +34,7 @@ A Polkadot [collator](https://wiki.polkadot.network/docs/en/learn-collator) for
 `polkadot-parachain` binary (previously called `polkadot-collator`).
 
 You may run `polkadot-parachain` locally after building it or using one of the container option described
-[here](./docs/contributor/container.md).
+[here](../docs/contributor/container.md).
 
 ### Relay Chain Interaction
 To operate a parachain node, a connection to the corresponding relay chain is necessary. This can be achieved in one of
diff --git a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs
index 68f2d37c8748863be879134d3fd0849adf5efb11..dbd9d5ba6a6f9c8dcb20e753b086eebe43e15f60 100644
--- a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs
+++ b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs
@@ -97,6 +97,7 @@ where
 		// This is done for example when gap syncing and it is expected that the block after the gap
 		// was checked/chosen properly, e.g. by warp syncing to this block using a finality proof.
 		if block_params.state_action.skip_execution_checks() || block_params.with_state() {
+			block_params.fork_choice = Some(ForkChoiceStrategy::Custom(block_params.with_state()));
 			return Ok(block_params)
 		}
 
diff --git a/cumulus/docs/overview.md b/cumulus/docs/overview.md
index 402c56042c4911a3ecf49e0a7cbef5d15e2a0e9b..82603257a871b2acc67418d7aafcf89d92c11554 100644
--- a/cumulus/docs/overview.md
+++ b/cumulus/docs/overview.md
@@ -70,7 +70,7 @@ A Parachain validator needs to validate a given PoVBlock, but without requiring
 the Parachain. To still make it possible to validate the Parachain block, the PoVBlock contains the
 witness data. The witness data is a proof that is collected while building the block. The proof will
 contain all trie nodes that are read during the block production. Cumulus uses the witness data to
-reconstruct a partial trie and uses this a storage when executing the block.
+reconstruct a partial trie and uses this as storage when executing the block.
 
 The outgoing messages are also collected at block production. These are messages from the Parachain
 the block is built for to other Parachains or to the relay chain itself.
diff --git a/cumulus/docs/release.md b/cumulus/docs/release.md
deleted file mode 100644
index 8302b7b9b7fc046920a92a97894ff16e8baca214..0000000000000000000000000000000000000000
--- a/cumulus/docs/release.md
+++ /dev/null
@@ -1,135 +0,0 @@
-# Releases
-
-## Versioning
-
-### Example #1
-
-```
-| Polkadot   | v  0. 9.22    |
-| Client     | v  0. 9.22 0  |
-| Runtime    | v     9 22 0  |  =>  9220
-| semver     |    0. 9.22 0  |
-```
-
-### Example #2
-
-```
-| Polkadot   | v  0.10.42    |
-| Client     | v  0.10.42 0  |
-| Runtime    | v    10.42 0  |  => 10420
-| semver     |    0.10.42 0  |
-```
-
-### Example #3
-
-```
-| Polkadot   | v  1. 2.18    |
-| Client     | v  1. 2.18 0  |
-| Runtime    | v  1  2 18 0  |  => 102180
-| semver     |    1. 2.18 0  |
-```
-
-
-This document contains information related to the releasing process and describes a few of the steps and checks that are
-performed during the release process.
-
-## Client
-
-### <a name="burnin"></a>Burn In
-
-Ensure that Parity DevOps has run the new release on Westend and Kusama Asset Hub collators for 12h prior to publishing
-the release.
-
-### Build Artifacts
-
-Add any necessary assets to the release. They should include:
-
-- Linux binaries
-    - GPG signature
-    - SHA256 checksum
-- WASM binaries of the runtimes
-- Source code
-
-
-## Runtimes
-
-### Spec Version
-
-A new runtime release must bump the `spec_version`. This may follow a pattern with the client release (e.g. runtime
-v9220 corresponds to v0.9.22).
-
-### Runtime version bump between RCs
-
-The clients need to be aware of runtime changes. However, we do not want to bump the `spec_version` for every single
-release candidate. Instead, we can bump the `impl` field of the version to signal the change to the client. This applies
-only to runtimes that have been deployed.
-
-### Old Migrations Removed
-
-Previous `on_runtime_upgrade` functions from old upgrades should be removed.
-
-### New Migrations
-
-Ensure that any migrations that are required due to storage or logic changes are included in the `on_runtime_upgrade`
-function of the appropriate pallets.
-
-### Extrinsic Ordering & Storage
-
-Offline signing libraries depend on a consistent ordering of call indices and functions. Compare the metadata of the
-current and new runtimes and ensure that the `module index, call index` tuples map to the same set of functions. It also
-checks if there have been any changes in `storage`. In case of a breaking change, increase `transaction_version`.
-
-To verify the order has not changed, manually start the following
-[Github Action](https://github.com/paritytech/polkadot-sdk/cumulus/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml).
-It takes around a minute to run and will produce the report as artifact you need to manually check.
-
-To run it, in the _Run Workflow_ dropdown:
-1. **Use workflow from**: to ignore, leave `master` as default
-2. **The WebSocket url of the reference node**: - Asset Hub Polkadot: `wss://statemint-rpc.polkadot.io`
-    - Asset Hub Kusama: `wss://statemine-rpc.polkadot.io`
-    - Asset Hub Westend: `wss://westmint-rpc.polkadot.io`
-3. **A url to a Linux binary for the node containing the runtime to test**: Paste the URL of the latest
-   release-candidate binary from the draft-release on Github. The binary has to previously be uploaded to S3 (Github url
-   link to the binary is constantly changing)
-    - E.g: https://releases.parity.io/cumulus/v0.9.270-rc3/polkadot-parachain
-4. **The name of the chain under test. Usually, you would pass a local chain**: - Asset Hub Polkadot:
-	`asset-hub-polkadot-local`
-    - Asset Hub Kusama: `asset-hub-kusama-local`
-    - Asset Hub Westend: `asset-hub-westend-local`
-5. Click **Run workflow**
-
-When the workflow is done, click on it and download the zip artifact, inside you'll find an `output.txt` file. The
-things to look for in the output are lines like:
-
-- `[Identity] idx 28 -> 25 (calls 15)` - indicates the index for Identity has changed
-- `[+] Society, Recovery` - indicates the new version includes 2 additional modules/pallets.
-- If no indices have changed, every modules line should look something like `[Identity] idx 25 (calls 15)`
-
-**Note**: Adding new functions to the runtime does not constitute a breaking change as long as the indexes did not
-change.
-
-**Note**: Extrinsic function signatures changes (adding/removing & ordering arguments) are not caught by the job, so
-those changes should be reviewed "manually"
-
-### Benchmarks
-
-The Benchmarks can now be started from the CI. First find the CI pipeline from
-[here](https://gitlab.parity.io/parity/mirrors/cumulus/-/pipelines?page=1&scope=all&ref=release-parachains-v9220) and
-pick the latest. [Guide](https://github.com/paritytech/ci_cd/wiki/Benchmarks:-cumulus)
-
-### Integration Tests
-
-Until https://github.com/paritytech/ci_cd/issues/499 is done, tests will have to be run manually.
-1. Go to https://github.com/paritytech/parachains-integration-tests and check out the release branch. E.g.
-https://github.com/paritytech/parachains-integration-tests/tree/release-v9270-v0.9.27 for `release-parachains-v0.9.270`
-2. Clone `release-parachains-<version>` branch from Cumulus
-3. `cargo build --release`
-4. Copy `./target/polkadot-parachain` to `./bin`
-5. Clone `it/release-<version>-fast-sudo` from Polkadot In case the branch does not exists (it is a manual process):
-	cherry pick `paritytech/polkadot@791c8b8` and run:
-	`find . -type f -name "*.toml" -print0 | xargs -0 sed -i '' -e 's/polkadot-vX.X.X/polkadot-v<version>/g'`
-6. `cargo build --release --features fast-runtime`
-7. Copy `./target/polkadot` into `./bin` (in Cumulus)
-8. Run the tests:
-   - Asset Hub Polkadot: `yarn zombienet-test -c ./examples/statemint/config.toml -t ./examples/statemint`
-   - Asset Hub Kusama: `yarn zombienet-test -c ./examples/statemine/config.toml -t ./examples/statemine`
diff --git a/cumulus/pallets/weight-reclaim/Cargo.toml b/cumulus/pallets/weight-reclaim/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..8bde6abaff6a1a9e53f0e7048d94b18cc43c5325
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/Cargo.toml
@@ -0,0 +1,63 @@
+[package]
+name = "cumulus-pallet-weight-reclaim"
+version = "1.0.0"
+authors.workspace = true
+edition.workspace = true
+license = "Apache-2.0"
+homepage.workspace = true
+repository.workspace = true
+description = "pallet and transaction extensions for accurate proof size reclaim"
+
+[lints]
+workspace = true
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+# Substrate dependencies
+sp-io = { workspace = true }
+sp-runtime = { workspace = true }
+sp-trie = { workspace = true }
+
+cumulus-primitives-storage-weight-reclaim = { workspace = true }
+frame-benchmarking = { optional = true, workspace = true }
+frame-support = { workspace = true }
+frame-system = { workspace = true }
+
+# Other dependencies
+codec = { features = ["derive"], workspace = true }
+derivative = { features = ["use_core"], workspace = true }
+docify = { workspace = true }
+log = { workspace = true, default-features = true }
+scale-info = { features = ["derive"], workspace = true }
+
+[dev-dependencies]
+cumulus-primitives-proof-size-hostfunction = { workspace = true }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"cumulus-primitives-proof-size-hostfunction/std",
+	"cumulus-primitives-storage-weight-reclaim/std",
+	"frame-benchmarking?/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"scale-info/std",
+	"sp-io/std",
+	"sp-runtime/std",
+	"sp-trie/std",
+]
+runtime-benchmarks = [
+	"frame-benchmarking/runtime-benchmarks",
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"sp-runtime/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"sp-runtime/try-runtime",
+]
diff --git a/cumulus/pallets/weight-reclaim/src/benchmarks.rs b/cumulus/pallets/weight-reclaim/src/benchmarks.rs
new file mode 100644
index 0000000000000000000000000000000000000000..78bebc967d96bd91ec3f9edd878caf7314523861
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/src/benchmarks.rs
@@ -0,0 +1,71 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+#![cfg(feature = "runtime-benchmarks")]
+
+use super::*;
+use frame_support::pallet_prelude::{DispatchClass, Pays};
+use frame_system::RawOrigin;
+use sp_runtime::traits::{AsTransactionAuthorizedOrigin, DispatchTransaction};
+
+#[frame_benchmarking::v2::benchmarks(
+	where T: Send + Sync,
+		<T as frame_system::Config>::RuntimeCall:
+			Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+		<T as frame_system::Config>::RuntimeOrigin: AsTransactionAuthorizedOrigin,
+)]
+mod bench {
+	use super::*;
+	use frame_benchmarking::impl_test_function;
+
+	#[benchmark]
+	fn storage_weight_reclaim() {
+		let ext = StorageWeightReclaim::<T, ()>::new(());
+
+		let origin = RawOrigin::Root.into();
+		let call = T::RuntimeCall::from(frame_system::Call::remark { remark: alloc::vec![] });
+
+		let overestimate = 10_000;
+		let info = DispatchInfo {
+			call_weight: Weight::zero().add_proof_size(overestimate),
+			extension_weight: Weight::zero(),
+			class: DispatchClass::Normal,
+			pays_fee: Pays::No,
+		};
+
+		let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No };
+
+		let mut block_weight = frame_system::ConsumedWeight::default();
+		block_weight.accrue(Weight::from_parts(0, overestimate), info.class);
+
+		frame_system::BlockWeight::<T>::put(block_weight);
+
+		#[block]
+		{
+			assert!(ext.test_run(origin, &call, &info, 0, 0, |_| Ok(post_info)).unwrap().is_ok());
+		}
+
+		let final_block_proof_size =
+			frame_system::BlockWeight::<T>::get().get(info.class).proof_size();
+
+		assert!(
+			final_block_proof_size < overestimate,
+			"The proof size measured should be less than {overestimate}"
+		);
+	}
+
+	impl_benchmark_test_suite!(Pallet, crate::tests::setup_test_ext_default(), crate::tests::Test);
+}
diff --git a/cumulus/pallets/weight-reclaim/src/lib.rs b/cumulus/pallets/weight-reclaim/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..bd9929033af14e43614ea1cd70bdd1b3232a6cfe
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/src/lib.rs
@@ -0,0 +1,311 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Pallet and transaction extensions to reclaim PoV proof size weight after an extrinsic has been
+//! applied.
+//!
+//! This crate provides:
+//! * [`StorageWeightReclaim`] transaction extension: it must wrap the whole transaction extension
+//!   pipeline.
+//! * The pallet required for the transaction extensions weight information and benchmarks.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+extern crate alloc;
+#[cfg(not(feature = "std"))]
+use alloc::vec::Vec;
+use codec::{Decode, Encode};
+use cumulus_primitives_storage_weight_reclaim::get_proof_size;
+use derivative::Derivative;
+use frame_support::{
+	dispatch::{DispatchInfo, PostDispatchInfo},
+	pallet_prelude::Weight,
+	traits::Defensive,
+};
+use scale_info::TypeInfo;
+use sp_runtime::{
+	traits::{DispatchInfoOf, Dispatchable, Implication, PostDispatchInfoOf, TransactionExtension},
+	transaction_validity::{TransactionSource, TransactionValidityError, ValidTransaction},
+	DispatchResult,
+};
+
+#[cfg(feature = "runtime-benchmarks")]
+pub mod benchmarks;
+#[cfg(test)]
+mod tests;
+mod weights;
+
+pub use pallet::*;
+pub use weights::WeightInfo;
+
+const LOG_TARGET: &'static str = "runtime::storage_reclaim_pallet";
+
+/// Pallet to use alongside the transaction extension [`StorageWeightReclaim`], the pallet provides
+/// weight information and benchmarks.
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+
+	#[pallet::pallet]
+	pub struct Pallet<T>(_);
+
+	#[pallet::config]
+	pub trait Config: frame_system::Config {
+		type WeightInfo: WeightInfo;
+	}
+}
+
+/// Storage weight reclaim mechanism.
+///
+/// This extension must wrap all the transaction extensions:
+#[doc = docify::embed!("./src/tests.rs", Tx)]
+///
+/// This extension checks the size of the node-side storage proof before and after executing a given
+/// extrinsic using the proof size host function. The difference between benchmarked and used weight
+/// is reclaimed.
+///
+/// If the benchmark was underestimating the proof size, then it is added to the block weight.
+///
+/// For the time part of the weight, it does same as system `WeightReclaim` extension, it
+/// calculates the unused weight using the post information and reclaim the unused weight.
+/// So this extension can be used as a drop-in replacement for `WeightReclaim` extension for
+/// parachains.
+#[derive(Encode, Decode, TypeInfo, Derivative)]
+#[derivative(
+	Clone(bound = "S: Clone"),
+	Eq(bound = "S: Eq"),
+	PartialEq(bound = "S: PartialEq"),
+	Default(bound = "S: Default")
+)]
+#[scale_info(skip_type_params(T))]
+pub struct StorageWeightReclaim<T, S>(pub S, core::marker::PhantomData<T>);
+
+impl<T, S> StorageWeightReclaim<T, S> {
+	/// Create a new `StorageWeightReclaim` instance.
+	pub fn new(s: S) -> Self {
+		Self(s, Default::default())
+	}
+}
+
+impl<T, S> From<S> for StorageWeightReclaim<T, S> {
+	fn from(s: S) -> Self {
+		Self::new(s)
+	}
+}
+
+impl<T, S: core::fmt::Debug> core::fmt::Debug for StorageWeightReclaim<T, S> {
+	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
+		#[cfg(feature = "std")]
+		let _ = write!(f, "StorageWeightReclaim<{:?}>", self.0);
+
+		#[cfg(not(feature = "std"))]
+		let _ = write!(f, "StorageWeightReclaim<wasm-stripped>");
+
+		Ok(())
+	}
+}
+
+impl<T: Config + Send + Sync, S: TransactionExtension<T::RuntimeCall>>
+	TransactionExtension<T::RuntimeCall> for StorageWeightReclaim<T, S>
+where
+	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+{
+	const IDENTIFIER: &'static str = "StorageWeightReclaim<Use `metadata()`!>";
+
+	type Implicit = S::Implicit;
+
+	// Initial proof size and inner extension value.
+	type Val = (Option<u64>, S::Val);
+
+	// Initial proof size and inner extension pre.
+	type Pre = (Option<u64>, S::Pre);
+
+	fn implicit(&self) -> Result<Self::Implicit, TransactionValidityError> {
+		self.0.implicit()
+	}
+
+	fn metadata() -> Vec<sp_runtime::traits::TransactionExtensionMetadata> {
+		let mut inner = S::metadata();
+		inner.push(sp_runtime::traits::TransactionExtensionMetadata {
+			identifier: "StorageWeightReclaim",
+			ty: scale_info::meta_type::<()>(),
+			implicit: scale_info::meta_type::<()>(),
+		});
+		inner
+	}
+
+	fn weight(&self, call: &T::RuntimeCall) -> Weight {
+		T::WeightInfo::storage_weight_reclaim().saturating_add(self.0.weight(call))
+	}
+
+	fn validate(
+		&self,
+		origin: T::RuntimeOrigin,
+		call: &T::RuntimeCall,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+		self_implicit: Self::Implicit,
+		inherited_implication: &impl Implication,
+		source: TransactionSource,
+	) -> Result<(ValidTransaction, Self::Val, T::RuntimeOrigin), TransactionValidityError> {
+		let proof_size = get_proof_size();
+
+		self.0
+			.validate(origin, call, info, len, self_implicit, inherited_implication, source)
+			.map(|(validity, val, origin)| (validity, (proof_size, val), origin))
+	}
+
+	fn prepare(
+		self,
+		val: Self::Val,
+		origin: &T::RuntimeOrigin,
+		call: &T::RuntimeCall,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+	) -> Result<Self::Pre, TransactionValidityError> {
+		let (proof_size, inner_val) = val;
+		self.0.prepare(inner_val, origin, call, info, len).map(|pre| (proof_size, pre))
+	}
+
+	fn post_dispatch_details(
+		pre: Self::Pre,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &PostDispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+		result: &DispatchResult,
+	) -> Result<Weight, TransactionValidityError> {
+		let (proof_size_before_dispatch, inner_pre) = pre;
+
+		let mut post_info_with_inner = *post_info;
+		S::post_dispatch(inner_pre, info, &mut post_info_with_inner, len, result)?;
+
+		let inner_refund = if let (Some(before_weight), Some(after_weight)) =
+			(post_info.actual_weight, post_info_with_inner.actual_weight)
+		{
+			before_weight.saturating_sub(after_weight)
+		} else {
+			Weight::zero()
+		};
+
+		let Some(proof_size_before_dispatch) = proof_size_before_dispatch else {
+			// We have no proof size information, there is nothing we can do.
+			return Ok(inner_refund);
+		};
+
+		let Some(proof_size_after_dispatch) = get_proof_size().defensive_proof(
+			"Proof recording enabled during prepare, now disabled. This should not happen.",
+		) else {
+			return Ok(inner_refund)
+		};
+
+		// The consumed proof size as measured by the host.
+		let measured_proof_size =
+			proof_size_after_dispatch.saturating_sub(proof_size_before_dispatch);
+
+		// The consumed weight as benchmarked. Calculated from post info and info.
+		// NOTE: `calc_actual_weight` will take the minimum of `post_info` and `info` weights.
+		// This means any underestimation of compute time in the pre dispatch info will not be
+		// taken into account.
+		let benchmarked_actual_weight = post_info_with_inner.calc_actual_weight(info);
+
+		let benchmarked_actual_proof_size = benchmarked_actual_weight.proof_size();
+		if benchmarked_actual_proof_size < measured_proof_size {
+			log::error!(
+				target: LOG_TARGET,
+				"Benchmarked storage weight smaller than consumed storage weight. \
+				benchmarked: {benchmarked_actual_proof_size} consumed: {measured_proof_size}"
+			);
+		} else {
+			log::trace!(
+				target: LOG_TARGET,
+				"Reclaiming storage weight. benchmarked: {benchmarked_actual_proof_size},
+				consumed: {measured_proof_size}"
+			);
+		}
+
+		let accurate_weight = benchmarked_actual_weight.set_proof_size(measured_proof_size);
+
+		let pov_size_missing_from_node = frame_system::BlockWeight::<T>::mutate(|current_weight| {
+			let already_reclaimed = frame_system::ExtrinsicWeightReclaimed::<T>::get();
+			current_weight.accrue(already_reclaimed, info.class);
+			current_weight.reduce(info.total_weight(), info.class);
+			current_weight.accrue(accurate_weight, info.class);
+
+			// If we encounter a situation where the node-side proof size is already higher than
+			// what we have in the runtime bookkeeping, we add the difference to the `BlockWeight`.
+			// This prevents that the proof size grows faster than the runtime proof size.
+			let extrinsic_len = frame_system::AllExtrinsicsLen::<T>::get().unwrap_or(0);
+			let node_side_pov_size = proof_size_after_dispatch.saturating_add(extrinsic_len.into());
+			let block_weight_proof_size = current_weight.total().proof_size();
+			let pov_size_missing_from_node =
+				node_side_pov_size.saturating_sub(block_weight_proof_size);
+			if pov_size_missing_from_node > 0 {
+				log::warn!(
+					target: LOG_TARGET,
+					"Node-side PoV size higher than runtime proof size weight. node-side: \
+					{node_side_pov_size} extrinsic_len: {extrinsic_len} runtime: \
+					{block_weight_proof_size}, missing: {pov_size_missing_from_node}. Setting to \
+					node-side proof size."
+				);
+				current_weight
+					.accrue(Weight::from_parts(0, pov_size_missing_from_node), info.class);
+			}
+
+			pov_size_missing_from_node
+		});
+
+		// The saturation will happen if the pre-dispatch weight is underestimating the proof
+		// size or if the node-side proof size is higher than expected.
+		// In this case the extrinsic proof size weight reclaimed is 0 and not a negative reclaim.
+		let accurate_unspent = info
+			.total_weight()
+			.saturating_sub(accurate_weight)
+			.saturating_sub(Weight::from_parts(0, pov_size_missing_from_node));
+		frame_system::ExtrinsicWeightReclaimed::<T>::put(accurate_unspent);
+
+		// Call have already returned their unspent amount.
+		// (also transaction extension prior in the pipeline, but there shouldn't be any.)
+		let already_unspent_in_tx_ext_pipeline = post_info.calc_unspent(info);
+		Ok(accurate_unspent.saturating_sub(already_unspent_in_tx_ext_pipeline))
+	}
+
+	fn bare_validate(
+		call: &T::RuntimeCall,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+	) -> frame_support::pallet_prelude::TransactionValidity {
+		S::bare_validate(call, info, len)
+	}
+
+	fn bare_validate_and_prepare(
+		call: &T::RuntimeCall,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+	) -> Result<(), TransactionValidityError> {
+		S::bare_validate_and_prepare(call, info, len)
+	}
+
+	fn bare_post_dispatch(
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &mut PostDispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+		result: &DispatchResult,
+	) -> Result<(), TransactionValidityError> {
+		S::bare_post_dispatch(info, post_info, len, result)?;
+
+		frame_system::Pallet::<T>::reclaim_weight(info, post_info)
+	}
+}
diff --git a/cumulus/pallets/weight-reclaim/src/tests.rs b/cumulus/pallets/weight-reclaim/src/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b87c107c7ec71ce8dea8b04b702f673373dfd16f
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/src/tests.rs
@@ -0,0 +1,1050 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+#![cfg(test)]
+
+use super::*;
+use cumulus_primitives_proof_size_hostfunction::PROOF_RECORDING_DISABLED;
+use frame_support::{
+	assert_ok, derive_impl, dispatch::GetDispatchInfo, pallet_prelude::DispatchClass,
+};
+use sp_runtime::{
+	generic,
+	traits::{Applyable, BlakeTwo256, DispatchTransaction, Get},
+	BuildStorage,
+};
+use sp_trie::proof_size_extension::ProofSizeExt;
+
+thread_local! {
+	static CHECK_WEIGHT_WEIGHT: core::cell::RefCell<Weight> = Default::default();
+	static STORAGE_WEIGHT_RECLAIM_WEIGHT: core::cell::RefCell<Weight> = Default::default();
+	static MOCK_EXT_WEIGHT: core::cell::RefCell<Weight> = Default::default();
+	static MOCK_EXT_REFUND: core::cell::RefCell<Weight> = Default::default();
+}
+
+/// An extension which has some proof_size weight and some proof_size refund.
+#[derive(Encode, Decode, Debug, Clone, PartialEq, Eq, scale_info::TypeInfo)]
+pub struct MockExtensionWithRefund;
+
+impl TransactionExtension<RuntimeCall> for MockExtensionWithRefund {
+	const IDENTIFIER: &'static str = "mock_extension_with_refund";
+	type Implicit = ();
+	type Val = ();
+	type Pre = ();
+	fn weight(&self, _: &RuntimeCall) -> Weight {
+		MOCK_EXT_WEIGHT.with_borrow(|v| *v)
+	}
+	fn post_dispatch_details(
+		_pre: Self::Pre,
+		_info: &DispatchInfoOf<RuntimeCall>,
+		_post_info: &PostDispatchInfoOf<RuntimeCall>,
+		_len: usize,
+		_result: &DispatchResult,
+	) -> Result<Weight, TransactionValidityError> {
+		Ok(MOCK_EXT_REFUND.with_borrow(|v| *v))
+	}
+	fn bare_post_dispatch(
+		_info: &DispatchInfoOf<RuntimeCall>,
+		post_info: &mut PostDispatchInfoOf<RuntimeCall>,
+		_len: usize,
+		_result: &DispatchResult,
+	) -> Result<(), TransactionValidityError> {
+		if let Some(ref mut w) = post_info.actual_weight {
+			*w -= MOCK_EXT_REFUND.with_borrow(|v| *v);
+		}
+		Ok(())
+	}
+
+	sp_runtime::impl_tx_ext_default!(RuntimeCall; validate prepare);
+}
+
+pub type Tx =
+	crate::StorageWeightReclaim<Test, (frame_system::CheckWeight<Test>, MockExtensionWithRefund)>;
+type AccountId = u64;
+type Extrinsic = generic::UncheckedExtrinsic<AccountId, RuntimeCall, (), Tx>;
+type Block = generic::Block<generic::Header<AccountId, BlakeTwo256>, Extrinsic>;
+
+#[frame_support::runtime]
+mod runtime {
+	#[runtime::runtime]
+	#[runtime::derive(
+		RuntimeCall,
+		RuntimeEvent,
+		RuntimeError,
+		RuntimeOrigin,
+		RuntimeFreezeReason,
+		RuntimeHoldReason,
+		RuntimeSlashReason,
+		RuntimeLockId,
+		RuntimeTask
+	)]
+	pub struct Test;
+
+	#[runtime::pallet_index(0)]
+	pub type System = frame_system::Pallet<Test>;
+
+	#[runtime::pallet_index(1)]
+	pub type WeightReclaim = crate::Pallet<Test>;
+}
+
+pub struct MockWeightInfo;
+
+impl frame_system::ExtensionsWeightInfo for MockWeightInfo {
+	fn check_genesis() -> Weight {
+		Default::default()
+	}
+	fn check_mortality_mortal_transaction() -> Weight {
+		Default::default()
+	}
+	fn check_mortality_immortal_transaction() -> Weight {
+		Default::default()
+	}
+	fn check_non_zero_sender() -> Weight {
+		Default::default()
+	}
+	fn check_nonce() -> Weight {
+		Default::default()
+	}
+	fn check_spec_version() -> Weight {
+		Default::default()
+	}
+	fn check_tx_version() -> Weight {
+		Default::default()
+	}
+	fn check_weight() -> Weight {
+		CHECK_WEIGHT_WEIGHT.with_borrow(|v| *v)
+	}
+	fn weight_reclaim() -> Weight {
+		Default::default()
+	}
+}
+
+impl frame_system::WeightInfo for MockWeightInfo {
+	fn remark(_b: u32) -> Weight {
+		Weight::from_parts(400, 0)
+	}
+	fn set_code() -> Weight {
+		Weight::zero()
+	}
+	fn set_storage(_i: u32) -> Weight {
+		Weight::zero()
+	}
+	fn kill_prefix(_p: u32) -> Weight {
+		Weight::zero()
+	}
+	fn kill_storage(_i: u32) -> Weight {
+		Weight::zero()
+	}
+	fn set_heap_pages() -> Weight {
+		Weight::zero()
+	}
+	fn remark_with_event(_b: u32) -> Weight {
+		Weight::zero()
+	}
+	fn authorize_upgrade() -> Weight {
+		Weight::zero()
+	}
+	fn apply_authorized_upgrade() -> Weight {
+		Weight::zero()
+	}
+}
+
+impl crate::WeightInfo for MockWeightInfo {
+	fn storage_weight_reclaim() -> Weight {
+		STORAGE_WEIGHT_RECLAIM_WEIGHT.with_borrow(|v| *v)
+	}
+}
+
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
+impl frame_system::Config for Test {
+	type Block = Block;
+	type AccountData = ();
+	type MaxConsumers = frame_support::traits::ConstU32<3>;
+	type ExtensionsWeightInfo = MockWeightInfo;
+}
+
+impl crate::Config for Test {
+	type WeightInfo = MockWeightInfo;
+}
+
+fn new_test_ext() -> sp_io::TestExternalities {
+	RuntimeGenesisConfig::default().build_storage().unwrap().into()
+}
+
+struct TestRecorder {
+	return_values: Box<[usize]>,
+	counter: core::sync::atomic::AtomicUsize,
+}
+
+impl TestRecorder {
+	fn new(values: &[usize]) -> Self {
+		TestRecorder { return_values: values.into(), counter: Default::default() }
+	}
+}
+
+impl sp_trie::ProofSizeProvider for TestRecorder {
+	fn estimate_encoded_size(&self) -> usize {
+		let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
+		self.return_values[counter]
+	}
+}
+
+fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities {
+	let mut test_ext = new_test_ext();
+	let test_recorder = TestRecorder::new(proof_values);
+	test_ext.register_extension(ProofSizeExt::new(test_recorder));
+	test_ext
+}
+
+#[cfg(feature = "runtime-benchmarks")]
+pub fn setup_test_ext_default() -> sp_io::TestExternalities {
+	setup_test_externalities(&[0; 32])
+}
+
+fn set_current_storage_weight(new_weight: u64) {
+	frame_system::BlockWeight::<Test>::mutate(|current_weight| {
+		current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal);
+	});
+}
+
+fn get_storage_weight() -> Weight {
+	*frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal)
+}
+
+const CALL: &<Test as frame_system::Config>::RuntimeCall =
+	&RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 });
+const ALICE_ORIGIN: frame_system::Origin<Test> = frame_system::Origin::<Test>::Signed(99);
+const LEN: usize = 150;
+
+fn new_tx_ext() -> Tx {
+	Tx::new((frame_system::CheckWeight::new(), MockExtensionWithRefund))
+}
+
+fn new_extrinsic() -> generic::CheckedExtrinsic<AccountId, RuntimeCall, Tx> {
+	generic::CheckedExtrinsic {
+		format: generic::ExtrinsicFormat::Signed(99, new_tx_ext()),
+		function: RuntimeCall::System(frame_system::Call::remark { remark: vec![] }),
+	}
+}
+
+#[allow(unused)]
+mod doc {
+	type Runtime = super::Test;
+	use crate::StorageWeightReclaim;
+
+	#[docify::export(Tx)]
+	type Tx = StorageWeightReclaim<
+		Runtime,
+		(
+			frame_system::CheckNonce<Runtime>,
+			frame_system::CheckWeight<Runtime>,
+			// ... all other extensions
+			// No need for `frame_system::WeightReclaim` as the reclaim.
+		),
+	>;
+}
+
+#[test]
+fn basic_refund_no_post_info() {
+	// The real cost will be 100 bytes of storage size
+	let mut test_ext = setup_test_externalities(&[0, 100]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+
+		// Benchmarked storage weight: 500
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+
+		let tx_ext = new_tx_ext();
+
+		// Check weight should add 500 + 150 (len) to weight.
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(0));
+
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight, None);
+		assert_eq!(get_storage_weight().proof_size(), 1250);
+	});
+}
+
+#[test]
+fn basic_refund_some_post_info() {
+	// The real cost will be 100 bytes of storage size
+	let mut test_ext = setup_test_externalities(&[0, 100]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+
+		// Benchmarked storage weight: 500
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		// Check weight should add 500 + 150 (len) to weight.
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(0));
+
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(0, 100));
+		assert_eq!(get_storage_weight().proof_size(), 1250);
+	});
+}
+
+#[test]
+fn does_nothing_without_extension() {
+	// Proof size extension not registered
+	let mut test_ext = new_test_ext();
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+
+		// Benchmarked storage weight: 500
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		// Check weight should add 500 + 150 (len) to weight.
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, None);
+
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), info.total_weight());
+		assert_eq!(get_storage_weight().proof_size(), 1650);
+	})
+}
+
+#[test]
+fn negative_refund_is_added_to_weight() {
+	let mut test_ext = setup_test_externalities(&[100, 300]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+		// Benchmarked storage weight: 100
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 100), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		// Weight added should be 100 + 150 (len)
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(100));
+
+		// We expect no refund
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), info.total_weight());
+		assert_eq!(
+			get_storage_weight().proof_size(),
+			1100 + LEN as u64 + info.total_weight().proof_size()
+		);
+	})
+}
+
+#[test]
+fn test_zero_proof_size() {
+	let mut test_ext = setup_test_externalities(&[0, 0]);
+
+	test_ext.execute_with(|| {
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(0));
+
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(0, 0));
+		// Proof size should be exactly equal to extrinsic length
+		assert_eq!(get_storage_weight().proof_size(), LEN as u64);
+	});
+}
+
+#[test]
+fn test_larger_pre_dispatch_proof_size() {
+	let mut test_ext = setup_test_externalities(&[300, 100]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1300);
+
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		// Adds 500 + 150 (len) weight, total weight is 1950
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(300));
+
+		// check weight:
+		// Refund 500 unspent weight according to `post_info`, total weight is now 1650
+		//
+		// storage reclaim:
+		// Recorded proof size is negative -200, total weight is now 1450
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(0, 0));
+		assert_eq!(get_storage_weight().proof_size(), 1450);
+	});
+}
+
+#[test]
+fn test_incorporates_check_weight_unspent_weight() {
+	let mut test_ext = setup_test_externalities(&[100, 300]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+
+		// Benchmarked storage weight: 300
+		let info = DispatchInfo { call_weight: Weight::from_parts(100, 300), ..Default::default() };
+
+		// Actual weight is 50
+		let mut post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(50, 250)),
+			pays_fee: Default::default(),
+		};
+
+		let tx_ext = new_tx_ext();
+
+		// Check weight should add 300 + 150 (len) of weight
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(100));
+
+		// The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo`
+		// we always need to call `post_dispatch` to verify that they interoperate correctly.
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(50, 350 - LEN as u64));
+		// Reclaimed 100
+		assert_eq!(get_storage_weight().proof_size(), 1350);
+	})
+}
+
+#[test]
+fn test_incorporates_check_weight_unspent_weight_on_negative() {
+	let mut test_ext = setup_test_externalities(&[100, 300]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+		// Benchmarked storage weight: 50
+		let info = DispatchInfo { call_weight: Weight::from_parts(100, 50), ..Default::default() };
+
+		// Actual weight is 25
+		let mut post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(50, 25)),
+			pays_fee: Default::default(),
+		};
+
+		let tx_ext = new_tx_ext();
+
+		// Adds 50 + 150 (len) weight, total weight 1200
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+		assert_eq!(pre.0, Some(100));
+
+		// The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo`
+		// CheckWeight: refunds unspent 25 weight according to `post_info`, 1175
+		//
+		// storage reclaim:
+		// Adds 200 - 25 (unspent) == 175 weight, total weight 1350
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(50, 25));
+		assert_eq!(get_storage_weight().proof_size(), 1350);
+	})
+}
+
+#[test]
+fn test_nothing_reclaimed() {
+	let mut test_ext = setup_test_externalities(&[0, 100]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(0);
+		// Benchmarked storage weight: 100
+		let info = DispatchInfo { call_weight: Weight::from_parts(100, 100), ..Default::default() };
+
+		// Actual proof size is 100
+		let mut post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(50, 100)),
+			pays_fee: Default::default(),
+		};
+
+		let tx_ext = new_tx_ext();
+
+		// Adds benchmarked weight 100 + 150 (len), total weight is now 250
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		// Weight should go up by 150 len + 100 proof size weight, total weight 250
+		assert_eq!(get_storage_weight().proof_size(), 250);
+
+		// Should return `setup_test_externalities` proof recorder value: 100.
+		assert_eq!(pre.0, Some(0));
+
+		// The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo`
+		// we always need to call `post_dispatch` to verify that they interoperate correctly.
+		// Nothing to refund, unspent is 0, total weight 250
+		//
+		// weight reclaim:
+		// `setup_test_externalities` proof recorder value: 200, so this means the extrinsic
+		// actually used 100 proof size.
+		// Nothing to refund or add, weight matches proof recorder
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(50, 100));
+		// Check block len weight was not reclaimed:
+		// 100 weight + 150 extrinsic len == 250 proof size
+		assert_eq!(get_storage_weight().proof_size(), 250);
+	})
+}
+
+// Test for refund of calls and related proof size
+#[test]
+fn test_series() {
+	struct TestCfg {
+		measured_proof_size_pre_dispatch: u64,
+		measured_proof_size_post_dispatch: u64,
+		info_call_weight: Weight,
+		info_extension_weight: Weight,
+		post_info_actual_weight: Option<Weight>,
+		block_weight_pre_dispatch: Weight,
+		mock_ext_refund: Weight,
+		assert_post_info_weight: Option<Weight>,
+		assert_block_weight_post_dispatch: Weight,
+	}
+
+	let base_extrinsic = <<Test as frame_system::Config>::BlockWeights as Get<
+		frame_system::limits::BlockWeights,
+	>>::get()
+	.per_class
+	.get(DispatchClass::Normal)
+	.base_extrinsic;
+
+	let tests = vec![
+		// Info is exact, no post info, no refund.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 400,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: None,
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(0, 0),
+			assert_post_info_weight: None,
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1100, 1300 + LEN as u64),
+		},
+		// some tx ext refund is ignored, because post info is None.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 400,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: None,
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 20),
+			assert_post_info_weight: None,
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1100, 1300 + LEN as u64),
+		},
+		// some tx ext refund is ignored on proof size because lower than actual measure.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 400,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: Some(Weight::from_parts(100, 300)),
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 20),
+			assert_post_info_weight: Some(Weight::from_parts(80, 300)),
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1080, 1300 + LEN as u64),
+		},
+		// post info doesn't double refund the call and is missing some.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 350,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: Some(Weight::from_parts(60, 200)),
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 20),
+			// 50 are missed in pov because 100 is unspent in post info but it should be only 50.
+			assert_post_info_weight: Some(Weight::from_parts(40, 200)),
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1040, 1250 + LEN as u64),
+		},
+		// post info doesn't double refund the call and is accurate.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 250,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: Some(Weight::from_parts(60, 200)),
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 20),
+			assert_post_info_weight: Some(Weight::from_parts(40, 150)),
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1040, 1150 + LEN as u64),
+		},
+		// post info doesn't double refund the call and is accurate. Even if mock ext is refunding
+		// too much.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 250,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: Some(Weight::from_parts(60, 200)),
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 300),
+			assert_post_info_weight: Some(Weight::from_parts(40, 150)),
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1040, 1150 + LEN as u64),
+		},
+	];
+
+	for (i, test) in tests.into_iter().enumerate() {
+		dbg!("test number: ", i);
+		MOCK_EXT_REFUND.with_borrow_mut(|v| *v = test.mock_ext_refund);
+		let mut test_ext = setup_test_externalities(&[
+			test.measured_proof_size_pre_dispatch as usize,
+			test.measured_proof_size_post_dispatch as usize,
+		]);
+
+		test_ext.execute_with(|| {
+			frame_system::BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(test.block_weight_pre_dispatch, DispatchClass::Normal);
+			});
+			// Benchmarked storage weight: 50
+			let info = DispatchInfo {
+				call_weight: test.info_call_weight,
+				extension_weight: test.info_extension_weight,
+				..Default::default()
+			};
+			let mut post_info = PostDispatchInfo {
+				actual_weight: test.post_info_actual_weight,
+				pays_fee: Default::default(),
+			};
+			let tx_ext = new_tx_ext();
+			let (pre, _) = tx_ext
+				.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+				.unwrap();
+			assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+			assert_eq!(post_info.actual_weight, test.assert_post_info_weight);
+			assert_eq!(
+				*frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				test.assert_block_weight_post_dispatch,
+			);
+		})
+	}
+}
+
+#[test]
+fn storage_size_reported_correctly() {
+	let mut test_ext = setup_test_externalities(&[1000]);
+	test_ext.execute_with(|| {
+		assert_eq!(get_proof_size(), Some(1000));
+	});
+
+	let mut test_ext = new_test_ext();
+
+	let test_recorder = TestRecorder::new(&[0]);
+
+	test_ext.register_extension(ProofSizeExt::new(test_recorder));
+
+	test_ext.execute_with(|| {
+		assert_eq!(get_proof_size(), Some(0));
+	});
+}
+
+#[test]
+fn storage_size_disabled_reported_correctly() {
+	let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]);
+
+	test_ext.execute_with(|| {
+		assert_eq!(get_proof_size(), None);
+	});
+}
+
+#[test]
+fn full_basic_refund() {
+	// Settings for the test:
+	let actual_used_proof_size = 200;
+	let check_weight = 100;
+	let storage_weight_reclaim = 100;
+	let mock_ext = 142;
+	let mock_ext_refund = 100;
+
+	// Test execution:
+	CHECK_WEIGHT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(1, check_weight));
+	STORAGE_WEIGHT_RECLAIM_WEIGHT
+		.with_borrow_mut(|v| *v = Weight::from_parts(1, storage_weight_reclaim));
+	MOCK_EXT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(36, mock_ext));
+	MOCK_EXT_REFUND.with_borrow_mut(|v| *v = Weight::from_parts(35, mock_ext_refund));
+
+	let initial_storage_weight = 1212u64;
+
+	let mut test_ext = setup_test_externalities(&[
+		initial_storage_weight as usize,
+		initial_storage_weight as usize + actual_used_proof_size,
+	]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(initial_storage_weight);
+
+		let extrinsic = new_extrinsic();
+		let call_info = extrinsic.function.get_dispatch_info();
+
+		let info = extrinsic.get_dispatch_info();
+		let post_info = extrinsic.apply::<Test>(&info, LEN).unwrap().unwrap();
+
+		// Assertions:
+		assert_eq!(
+			post_info.actual_weight.unwrap().ref_time(),
+			call_info.call_weight.ref_time() + 3,
+		);
+		assert_eq!(
+			post_info.actual_weight.unwrap().proof_size(),
+			// LEN is part of the base extrinsic, not the post info weight actual weight.
+			actual_used_proof_size as u64,
+		);
+		assert_eq!(
+			get_storage_weight().proof_size(),
+			initial_storage_weight + actual_used_proof_size as u64 + LEN as u64
+		);
+	});
+}
+
+#[test]
+fn full_accrue() {
+	// Settings for the test:
+	let actual_used_proof_size = 400;
+	let check_weight = 100;
+	let storage_weight_reclaim = 100;
+	let mock_ext = 142;
+	let mock_ext_refund = 100;
+
+	// Test execution:
+	CHECK_WEIGHT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(1, check_weight));
+	STORAGE_WEIGHT_RECLAIM_WEIGHT
+		.with_borrow_mut(|v| *v = Weight::from_parts(1, storage_weight_reclaim));
+	MOCK_EXT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(36, mock_ext));
+	MOCK_EXT_REFUND.with_borrow_mut(|v| *v = Weight::from_parts(35, mock_ext_refund));
+
+	let initial_storage_weight = 1212u64;
+
+	let mut test_ext = setup_test_externalities(&[
+		initial_storage_weight as usize,
+		initial_storage_weight as usize + actual_used_proof_size,
+	]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(initial_storage_weight);
+
+		let extrinsic = new_extrinsic();
+		let call_info = extrinsic.function.get_dispatch_info();
+
+		let info = extrinsic.get_dispatch_info();
+		let post_info = extrinsic.apply::<Test>(&info, LEN).unwrap().unwrap();
+
+		// Assertions:
+		assert_eq!(
+			post_info.actual_weight.unwrap().ref_time(),
+			call_info.call_weight.ref_time() + 3,
+		);
+		assert_eq!(
+			post_info.actual_weight.unwrap().proof_size(),
+			info.total_weight().proof_size(), // The post info doesn't get the accrue.
+		);
+		assert_eq!(
+			get_storage_weight().proof_size(),
+			initial_storage_weight + actual_used_proof_size as u64 + LEN as u64
+		);
+	});
+}
+
+#[test]
+fn bare_is_reclaimed() {
+	let mut test_ext = setup_test_externalities(&[]);
+	test_ext.execute_with(|| {
+		let info = DispatchInfo {
+			call_weight: Weight::from_parts(100, 100),
+			extension_weight: Weight::from_parts(100, 100),
+			class: DispatchClass::Normal,
+			pays_fee: Default::default(),
+		};
+		let mut post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(100, 100)),
+			pays_fee: Default::default(),
+		};
+		MOCK_EXT_REFUND.with_borrow_mut(|v| *v = Weight::from_parts(10, 10));
+
+		frame_system::BlockWeight::<Test>::mutate(|current_weight| {
+			current_weight
+				.set(Weight::from_parts(45, 45) + info.total_weight(), DispatchClass::Normal);
+		});
+
+		StorageWeightReclaim::<Test, MockExtensionWithRefund>::bare_post_dispatch(
+			&info,
+			&mut post_info,
+			0,
+			&Ok(()),
+		)
+		.expect("tx is valid");
+
+		assert_eq!(
+			*frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal),
+			Weight::from_parts(45 + 90, 45 + 90),
+		);
+	});
+}
+
+#[test]
+fn sets_to_node_storage_proof_if_higher() {
+	struct TestCfg {
+		initial_proof_size: u64,
+		post_dispatch_proof_size: u64,
+		mock_ext_proof_size: u64,
+		pre_dispatch_block_proof_size: u64,
+		assert_final_block_proof_size: u64,
+	}
+
+	let tests = vec![
+		// The storage proof reported by the proof recorder is higher than what is stored on
+		// the runtime side.
+		TestCfg {
+			initial_proof_size: 1000,
+			post_dispatch_proof_size: 1005,
+			mock_ext_proof_size: 0,
+			pre_dispatch_block_proof_size: 5,
+			// We expect that the storage weight was set to the node-side proof size (1005) +
+			// extrinsics length (150)
+			assert_final_block_proof_size: 1155,
+		},
+		// In this second scenario the proof size on the node side is only lower
+		// after reclaim happened.
+		TestCfg {
+			initial_proof_size: 175,
+			post_dispatch_proof_size: 180,
+			mock_ext_proof_size: 100,
+			pre_dispatch_block_proof_size: 85,
+			// After the pre_dispatch, the BlockWeight proof size will be
+			// 85 (initial) + 100 (benched) + 150 (tx length) = 335
+			//
+			// We expect that the storage weight was set to the node-side proof weight
+			// First we will reclaim 95, which leaves us with 240 BlockWeight.
+			// This is lower than 180 (proof size hf) + 150 (length).
+			// So we expect it to be set to 330.
+			assert_final_block_proof_size: 330,
+		},
+	];
+
+	for test in tests {
+		let mut test_ext = setup_test_externalities(&[
+			test.initial_proof_size as usize,
+			test.post_dispatch_proof_size as usize,
+		]);
+
+		CHECK_WEIGHT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, 0));
+		STORAGE_WEIGHT_RECLAIM_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, 0));
+		MOCK_EXT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, test.mock_ext_proof_size));
+
+		test_ext.execute_with(|| {
+			set_current_storage_weight(test.pre_dispatch_block_proof_size);
+
+			let extrinsic = new_extrinsic();
+			let call_info = extrinsic.function.get_dispatch_info();
+			assert_eq!(call_info.call_weight.proof_size(), 0);
+
+			let info = extrinsic.get_dispatch_info();
+			let _post_info = extrinsic.apply::<Test>(&info, LEN).unwrap().unwrap();
+
+			assert_eq!(get_storage_weight().proof_size(), test.assert_final_block_proof_size);
+		})
+	}
+}
+
+#[test]
+fn test_pov_missing_from_node_reclaim() {
+	// Test scenario: after dispatch the pov size from node side is less than block weight.
+	// Ensure `pov_size_missing_from_node` is calculated correctly, and `ExtrinsicWeightReclaimed`
+	// is updated correctly.
+
+	// Proof size:
+	let bench_pre_dispatch_call = 220;
+	let bench_post_dispatch_actual = 90;
+	let len = 20; // Only one extrinsic in the scenario. So all extrinsics length.
+	let block_pre_dispatch = 100;
+	let missing_from_node = 50;
+	let node_diff = 70;
+
+	let node_pre_dispatch = block_pre_dispatch + missing_from_node;
+	let node_post_dispatch = node_pre_dispatch + node_diff;
+
+	// Initialize the test.
+	let mut test_ext =
+		setup_test_externalities(&[node_pre_dispatch as usize, node_post_dispatch as usize]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(block_pre_dispatch);
+		let info = DispatchInfo {
+			call_weight: Weight::from_parts(0, bench_pre_dispatch_call),
+			extension_weight: Weight::from_parts(0, 0),
+			..Default::default()
+		};
+		let post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(0, bench_post_dispatch_actual)),
+			..Default::default()
+		};
+
+		// Execute the transaction.
+		let tx_ext = StorageWeightReclaim::<Test, frame_system::CheckWeight<Test>>::new(
+			frame_system::CheckWeight::new(),
+		);
+		tx_ext
+			.test_run(ALICE_ORIGIN.clone().into(), CALL, &info, len as usize, 0, |_| Ok(post_info))
+			.expect("valid")
+			.expect("success");
+
+		// Assert the results.
+		assert_eq!(
+			frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal).proof_size(),
+			node_post_dispatch + len,
+		);
+		assert_eq!(
+			frame_system::ExtrinsicWeightReclaimed::<Test>::get().proof_size(),
+			bench_pre_dispatch_call - node_diff - missing_from_node,
+		);
+	});
+}
+
+#[test]
+fn test_ref_time_weight_reclaim() {
+	// Test scenario: after dispatch the time weight is refunded correctly.
+
+	// Time weight:
+	let bench_pre_dispatch_call = 145;
+	let bench_post_dispatch_actual = 104;
+	let bench_mock_ext_weight = 63;
+	let bench_mock_ext_refund = 22;
+	let len = 20; // Only one extrinsic in the scenario. So all extrinsics length.
+	let block_pre_dispatch = 121;
+	let node_pre_dispatch = 0;
+	let node_post_dispatch = 0;
+
+	// Initialize the test.
+	CHECK_WEIGHT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, 0));
+	STORAGE_WEIGHT_RECLAIM_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, 0));
+	MOCK_EXT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(bench_mock_ext_weight, 0));
+	MOCK_EXT_REFUND.with_borrow_mut(|v| *v = Weight::from_parts(bench_mock_ext_refund, 0));
+
+	let base_extrinsic = <<Test as frame_system::Config>::BlockWeights as Get<
+		frame_system::limits::BlockWeights,
+	>>::get()
+	.per_class
+	.get(DispatchClass::Normal)
+	.base_extrinsic;
+
+	let mut test_ext =
+		setup_test_externalities(&[node_pre_dispatch as usize, node_post_dispatch as usize]);
+
+	test_ext.execute_with(|| {
+		frame_system::BlockWeight::<Test>::mutate(|current_weight| {
+			current_weight.set(Weight::from_parts(block_pre_dispatch, 0), DispatchClass::Normal);
+		});
+		let info = DispatchInfo {
+			call_weight: Weight::from_parts(bench_pre_dispatch_call, 0),
+			extension_weight: Weight::from_parts(bench_mock_ext_weight, 0),
+			..Default::default()
+		};
+		let post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(bench_post_dispatch_actual, 0)),
+			..Default::default()
+		};
+
+		type InnerTxExt = (frame_system::CheckWeight<Test>, MockExtensionWithRefund);
+		// Execute the transaction.
+		let tx_ext = StorageWeightReclaim::<Test, InnerTxExt>::new((
+			frame_system::CheckWeight::new(),
+			MockExtensionWithRefund,
+		));
+		tx_ext
+			.test_run(ALICE_ORIGIN.clone().into(), CALL, &info, len as usize, 0, |_| Ok(post_info))
+			.expect("valid transaction extension pipeline")
+			.expect("success");
+
+		// Assert the results.
+		assert_eq!(
+			frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal).ref_time(),
+			block_pre_dispatch +
+				base_extrinsic.ref_time() +
+				bench_post_dispatch_actual +
+				bench_mock_ext_weight -
+				bench_mock_ext_refund,
+		);
+		assert_eq!(
+			frame_system::ExtrinsicWeightReclaimed::<Test>::get().ref_time(),
+			bench_pre_dispatch_call - bench_post_dispatch_actual + bench_mock_ext_refund,
+		);
+	});
+}
+
+#[test]
+fn test_metadata() {
+	assert_eq!(
+		StorageWeightReclaim::<Test, frame_system::CheckWeight<Test>>::metadata()
+			.iter()
+			.map(|m| m.identifier)
+			.collect::<Vec<_>>(),
+		vec!["CheckWeight", "StorageWeightReclaim"]
+	);
+}
diff --git a/cumulus/pallets/weight-reclaim/src/weights.rs b/cumulus/pallets/weight-reclaim/src/weights.rs
new file mode 100644
index 0000000000000000000000000000000000000000..e651c8a783185b048f59e284ef957539a0221d2f
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/src/weights.rs
@@ -0,0 +1,74 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-08-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `fedora`, CPU: `13th Gen Intel(R) Core(TM) i7-1360P`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// ./target/release/parachain-template-node
+// benchmark
+// pallet
+// --pallet
+// cumulus-pallet-weight-reclaim
+// --chain
+// dev
+// --output
+// cumulus/pallets/weight-reclaim/src/weights.rs
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --extrinsic
+// *
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `cumulus_pallet_weight_reclaim`.
+pub trait WeightInfo {
+	fn storage_weight_reclaim() -> Weight;
+}
+
+/// Weights for `cumulus_pallet_weight_reclaim` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_247_000 picoseconds.
+		Weight::from_parts(2_466_000, 0)
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_247_000 picoseconds.
+		Weight::from_parts(2_466_000, 0)
+	}
+}
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs
index 5ef0993f70a1ce33daa68ec23b474716e6bee956..43398eb8bd48095f08eb5fd075f1c9b941b784c1 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs
@@ -16,7 +16,8 @@
 pub mod genesis;
 
 pub use bridge_hub_rococo_runtime::{
-	xcm_config::XcmConfig as BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue,
+	self as bridge_hub_rococo_runtime, xcm_config::XcmConfig as BridgeHubRococoXcmConfig,
+	EthereumBeaconClient, EthereumInboundQueue,
 	ExistentialDeposit as BridgeHubRococoExistentialDeposit,
 	RuntimeOrigin as BridgeHubRococoRuntimeOrigin,
 };
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs
index 54bc395c86f094b22350d5940c29b0b2a32cd36f..f84d42cb29f8ecfe5821b5814a9f4dac34ab0246 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs
@@ -50,6 +50,7 @@ mod imports {
 			AssetHubWestendParaPallet as AssetHubWestendPallet,
 		},
 		bridge_hub_rococo_emulated_chain::{
+			bridge_hub_rococo_runtime::bridge_to_ethereum_config::EthereumGatewayAddress,
 			genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoExistentialDeposit,
 			BridgeHubRococoParaPallet as BridgeHubRococoPallet, BridgeHubRococoRuntimeOrigin,
 			BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue,
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs
index c72d5045ddc07c780b7fe673d373c2935d445120..6364ff9fe959288041e369bffb90d117b080b12e 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs
@@ -20,8 +20,8 @@ use hex_literal::hex;
 use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender;
 use snowbridge_core::{inbound::InboundQueueFixture, outbound::OperatingMode};
 use snowbridge_pallet_inbound_queue_fixtures::{
-	register_token::make_register_token_message, send_token::make_send_token_message,
-	send_token_to_penpal::make_send_token_to_penpal_message,
+	register_token::make_register_token_message, send_native_eth::make_send_native_eth_message,
+	send_token::make_send_token_message, send_token_to_penpal::make_send_token_to_penpal_message,
 };
 use snowbridge_pallet_system;
 use snowbridge_router_primitives::inbound::{
@@ -238,7 +238,7 @@ fn register_weth_token_from_ethereum_to_asset_hub() {
 /// Tests the registering of a token as an asset on AssetHub, and then subsequently sending
 /// a token from Ethereum to AssetHub.
 #[test]
-fn send_token_from_ethereum_to_asset_hub() {
+fn send_weth_token_from_ethereum_to_asset_hub() {
 	BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND);
 
 	// Fund ethereum sovereign on AssetHub
@@ -278,7 +278,7 @@ fn send_token_from_ethereum_to_asset_hub() {
 /// Tests sending a token to a 3rd party parachain, called PenPal. The token reserve is
 /// still located on AssetHub.
 #[test]
-fn send_token_from_ethereum_to_penpal() {
+fn send_weth_from_ethereum_to_penpal() {
 	let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new(
 		1,
 		[Parachain(AssetHubRococo::para_id().into())],
@@ -515,6 +515,176 @@ fn send_weth_asset_from_asset_hub_to_ethereum() {
 	});
 }
 
+/// Tests the full cycle of eth transfers:
+/// - sending a token to AssetHub
+/// - returning the token to Ethereum
+#[test]
+fn send_eth_asset_from_asset_hub_to_ethereum_and_back() {
+	let ethereum_network: NetworkId = EthereumNetwork::get().into();
+	let origin_location = (Parent, Parent, ethereum_network).into();
+
+	use ahr_xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee;
+	let assethub_location = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id());
+	let assethub_sovereign = BridgeHubRococo::sovereign_account_id_of(assethub_location);
+	let ethereum_sovereign: AccountId =
+		EthereumLocationsConverterFor::<AccountId>::convert_location(&origin_location).unwrap();
+
+	AssetHubRococo::force_default_xcm_version(Some(XCM_VERSION));
+	BridgeHubRococo::force_default_xcm_version(Some(XCM_VERSION));
+	AssetHubRococo::force_xcm_version(origin_location.clone(), XCM_VERSION);
+
+	BridgeHubRococo::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]);
+	AssetHubRococo::fund_accounts(vec![
+		(AssetHubRococoReceiver::get(), INITIAL_FUND),
+		(ethereum_sovereign.clone(), INITIAL_FUND),
+	]);
+
+	// Register ETH
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		type RuntimeOrigin = <AssetHubRococo as Chain>::RuntimeOrigin;
+		assert_ok!(<AssetHubRococo as AssetHubRococoPallet>::ForeignAssets::force_create(
+			RuntimeOrigin::root(),
+			origin_location.clone(),
+			ethereum_sovereign.into(),
+			true,
+			1000,
+		));
+
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::ForceCreated { .. }) => {},
+			]
+		);
+	});
+	const ETH_AMOUNT: u128 = 1_000_000_000_000_000_000;
+
+	BridgeHubRococo::execute_with(|| {
+		type RuntimeEvent = <BridgeHubRococo as Chain>::RuntimeEvent;
+		type RuntimeOrigin = <BridgeHubRococo as Chain>::RuntimeOrigin;
+
+		// Set the gateway. This is needed because new fixtures use a different gateway address.
+		assert_ok!(<BridgeHubRococo as Chain>::System::set_storage(
+			RuntimeOrigin::root(),
+			vec![(
+				EthereumGatewayAddress::key().to_vec(),
+				sp_core::H160(hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d")).encode(),
+			)],
+		));
+
+		// Construct SendToken message and sent to inbound queue
+		assert_ok!(send_inbound_message(make_send_native_eth_message()));
+
+		// Check that the send token message was sent using xcm
+		assert_expected_events!(
+			BridgeHubRococo,
+			vec![
+				RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},
+			]
+		);
+	});
+
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		type RuntimeOrigin = <AssetHubRococo as Chain>::RuntimeOrigin;
+
+		let _issued_event = RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued {
+			asset_id: origin_location.clone(),
+			owner: AssetHubRococoReceiver::get().into(),
+			amount: ETH_AMOUNT,
+		});
+		// Check that AssetHub has issued the foreign asset
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				_issued_event => {},
+			]
+		);
+		let assets =
+			vec![Asset { id: AssetId(origin_location.clone()), fun: Fungible(ETH_AMOUNT) }];
+		let multi_assets = VersionedAssets::from(Assets::from(assets));
+
+		let destination = origin_location.clone().into();
+
+		let beneficiary = VersionedLocation::from(Location::new(
+			0,
+			[AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }],
+		));
+
+		let free_balance_before = <AssetHubRococo as AssetHubRococoPallet>::Balances::free_balance(
+			AssetHubRococoReceiver::get(),
+		);
+		// Send the Weth back to Ethereum
+		<AssetHubRococo as AssetHubRococoPallet>::PolkadotXcm::limited_reserve_transfer_assets(
+			RuntimeOrigin::signed(AssetHubRococoReceiver::get()),
+			Box::new(destination),
+			Box::new(beneficiary),
+			Box::new(multi_assets),
+			0,
+			Unlimited,
+		)
+		.unwrap();
+
+		let _burned_event = RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned {
+			asset_id: origin_location.clone(),
+			owner: AssetHubRococoReceiver::get().into(),
+			balance: ETH_AMOUNT,
+		});
+		// Check that AssetHub has issued the foreign asset
+		let _destination = origin_location.clone();
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				_burned_event => {},
+				RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent {
+					destination: _destination, ..
+				}) => {},
+			]
+		);
+
+		let free_balance_after = <AssetHubRococo as AssetHubRococoPallet>::Balances::free_balance(
+			AssetHubRococoReceiver::get(),
+		);
+		// Assert at least DefaultBridgeHubEthereumBaseFee charged from the sender
+		let free_balance_diff = free_balance_before - free_balance_after;
+		assert!(free_balance_diff > DefaultBridgeHubEthereumBaseFee::get());
+	});
+
+	BridgeHubRococo::execute_with(|| {
+		type RuntimeEvent = <BridgeHubRococo as Chain>::RuntimeEvent;
+		// Check that the transfer token back to Ethereum message was queue in the Ethereum
+		// Outbound Queue
+		assert_expected_events!(
+			BridgeHubRococo,
+			vec![
+				RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageAccepted {..}) => {},
+				RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageQueued {..}) => {},
+			]
+		);
+
+		let events = BridgeHubRococo::events();
+		// Check that the local fee was credited to the Snowbridge sovereign account
+		assert!(
+			events.iter().any(|event| matches!(
+				event,
+				RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount })
+					if *who == TREASURY_ACCOUNT.into() && *amount == 16903333
+			)),
+			"Snowbridge sovereign takes local fee."
+		);
+		// Check that the remote fee was credited to the AssetHub sovereign account
+		assert!(
+			events.iter().any(|event| matches!(
+				event,
+				RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount })
+					if *who == assethub_sovereign && *amount == 2680000000000,
+			)),
+			"AssetHub sovereign takes remote fee."
+		);
+	});
+}
+
 #[test]
 fn send_token_from_ethereum_to_asset_hub_fail_for_insufficient_fund() {
 	// Insufficient fund
@@ -565,7 +735,7 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() {
 	});
 }
 
-fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) {
+fn send_weth_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) {
 	let ethereum_network_v5: NetworkId = EthereumNetwork::get().into();
 	let weth_asset_location: Location =
 		Location::new(2, [ethereum_network_v5.into(), AccountKey20 { network: None, key: WETH }]);
@@ -623,8 +793,8 @@ fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u12
 }
 
 #[test]
-fn send_token_from_ethereum_to_existent_account_on_asset_hub() {
-	send_token_from_ethereum_to_asset_hub_with_fee(AssetHubRococoSender::get().into(), XCM_FEE);
+fn send_weth_from_ethereum_to_existent_account_on_asset_hub() {
+	send_weth_from_ethereum_to_asset_hub_with_fee(AssetHubRococoSender::get().into(), XCM_FEE);
 
 	AssetHubRococo::execute_with(|| {
 		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
@@ -640,8 +810,8 @@ fn send_token_from_ethereum_to_existent_account_on_asset_hub() {
 }
 
 #[test]
-fn send_token_from_ethereum_to_non_existent_account_on_asset_hub() {
-	send_token_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE);
+fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub() {
+	send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE);
 
 	AssetHubRococo::execute_with(|| {
 		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
@@ -657,8 +827,8 @@ fn send_token_from_ethereum_to_non_existent_account_on_asset_hub() {
 }
 
 #[test]
-fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() {
-	send_token_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE);
+fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() {
+	send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE);
 
 	AssetHubRococo::execute_with(|| {
 		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
@@ -675,10 +845,10 @@ fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficie
 }
 
 #[test]
-fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed(
+fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed(
 ) {
 	// On AH the xcm fee is 26_789_690 and the ED is 3_300_000
-	send_token_from_ethereum_to_asset_hub_with_fee([1; 32], 30_000_000);
+	send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], 30_000_000);
 
 	AssetHubRococo::execute_with(|| {
 		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
index 8d904b1de55faaeb970afe74f346b27af6e543ef..d612dd03c247a101049de02c4bf7822c73302783 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
@@ -82,11 +82,11 @@ assets-common = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -114,6 +114,7 @@ runtime-benchmarks = [
 	"assets-common/runtime-benchmarks",
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -154,6 +155,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
@@ -196,11 +198,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
index 78c09f7ac9e94dd705955f253911284cc561aba1..3261806d6f362fc832e82822fe7d6ffeab9eef51 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
@@ -185,6 +185,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -1062,6 +1066,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -1119,18 +1124,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
@@ -1315,6 +1322,7 @@ mod benches {
 		// NOTE: Make sure you point to the individual modules below.
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -1968,20 +1976,8 @@ impl_runtime_apis! {
 
 			type ToWestend = XcmBridgeHubRouterBench<Runtime, ToWestendXcmRouterInstance>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-				//TODO: use from relay_well_known_keys::ACTIVE_CONFIG
-				hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..c8f9bb7cd56c3c2d56224aa8043e3817901c18c4
--- /dev/null
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=asset-hub-rococo-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 7_301_000 picoseconds.
+		Weight::from_parts(7_536_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs
index 182410f20fffe19f74f887fa7858c7072d855172..a5c9fea3cdf53f3daacd3fb1a223056729d01fcf 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs
@@ -16,28 +16,29 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024
+//! HOSTNAME: `697235d969a1`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --wasm-execution=compiled
+// --extrinsic=*
+// --runtime=target/release/wbuild/asset-hub-rococo-runtime/asset_hub_rococo_runtime.wasm
 // --pallet=frame_system_extensions
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
 // --no-storage-info
-// --no-median-slopes
 // --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./cumulus/file_header.txt
-// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/
-// --chain=asset-hub-rococo-dev
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +57,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
 		//  Estimated: `3509`
-		// Minimum execution time: 3_637_000 picoseconds.
-		Weight::from_parts(6_382_000, 0)
+		// Minimum execution time: 8_313_000 picoseconds.
+		Weight::from_parts(8_528_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -67,8 +68,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_841_000 picoseconds.
-		Weight::from_parts(8_776_000, 0)
+		// Minimum execution time: 12_527_000 picoseconds.
+		Weight::from_parts(13_006_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -78,8 +79,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_841_000 picoseconds.
-		Weight::from_parts(8_776_000, 0)
+		// Minimum execution time: 12_380_000 picoseconds.
+		Weight::from_parts(12_922_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -87,44 +88,64 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 561_000 picoseconds.
-		Weight::from_parts(2_705_000, 0)
+		// Minimum execution time: 782_000 picoseconds.
+		Weight::from_parts(855_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_316_000 picoseconds.
-		Weight::from_parts(5_771_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 11_743_000 picoseconds.
+		Weight::from_parts(12_067_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 511_000 picoseconds.
-		Weight::from_parts(2_575_000, 0)
+		// Minimum execution time: 644_000 picoseconds.
+		Weight::from_parts(697_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 501_000 picoseconds.
-		Weight::from_parts(2_595_000, 0)
+		// Minimum execution time: 605_000 picoseconds.
+		Weight::from_parts(700_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_687_000 picoseconds.
-		Weight::from_parts(6_192_000, 0)
+		// Minimum execution time: 9_796_000 picoseconds.
+		Weight::from_parts(10_365_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 4_855_000 picoseconds.
+		Weight::from_parts(5_050_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs
index 3f37eefc32d9dd51037d3cb6c5aa185d136ef07a..6893766ac72d2113f993e54d3b28e5e136acac3d 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs
@@ -16,6 +16,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
index cb40f79579f923c64ad6689d26b0213d08a8e9ab..65ef63a7fb356c1b2c20a9fcddbc4bdc3d3b8bff 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
@@ -83,11 +83,11 @@ assets-common = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 pallet-message-queue = { workspace = true }
@@ -116,6 +116,7 @@ runtime-benchmarks = [
 	"assets-common/runtime-benchmarks",
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -158,6 +159,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
@@ -202,11 +204,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 36ab3fb01ceced4b1153e8fcfd09530849adb375..7bc9dfefb754d42d2a48647d26bb8b16a3be3969 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -187,6 +187,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -1082,6 +1086,7 @@ impl pallet_revive::Config for Runtime {
 	type Xcm = pallet_xcm::Pallet<Self>;
 	type ChainId = ConstU64<420_420_421>;
 	type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12.
+	type EthGasEncoder = ();
 }
 
 impl TryFrom<RuntimeCall> for pallet_revive::Call<Runtime> {
@@ -1105,6 +1110,7 @@ construct_runtime!(
 		// RandomnessCollectiveFlip = 2 removed
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -1165,18 +1171,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Default extensions applied to Ethereum transactions.
 #[derive(Clone, PartialEq, Eq, Debug)]
@@ -1196,9 +1204,9 @@ impl EthExtra for EthExtraImpl {
 			frame_system::CheckNonce::<Runtime>::from(nonce),
 			frame_system::CheckWeight::<Runtime>::new(),
 			pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::<Runtime>::from(tip, None),
-			cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::<Runtime>::new(),
 			frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(false),
 		)
+			.into()
 	}
 }
 
@@ -1446,6 +1454,7 @@ mod benches {
 		// NOTE: Make sure you point to the individual modules below.
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -2145,20 +2154,8 @@ impl_runtime_apis! {
 
 			type ToRococo = XcmBridgeHubRouterBench<Runtime, ToRococoXcmRouterInstance>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-				//TODO: use from relay_well_known_keys::ACTIVE_CONFIG
-				hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..1573a278e24674344714b1d466e47e723fbaca7b
--- /dev/null
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=asset-hub-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 7_470_000 picoseconds.
+		Weight::from_parts(7_695_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs
index e8dd9763c28261c19928d05719ccd3a4f5492416..a1bb92cf7008b99030421ca5fc81e1c5fda6faa8 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs
@@ -16,28 +16,29 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024
+//! HOSTNAME: `697235d969a1`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --wasm-execution=compiled
+// --extrinsic=*
+// --runtime=target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.wasm
 // --pallet=frame_system_extensions
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
 // --no-storage-info
-// --no-median-slopes
 // --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./cumulus/file_header.txt
-// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/
-// --chain=asset-hub-westend-dev
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +57,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
 		//  Estimated: `3509`
-		// Minimum execution time: 3_206_000 picoseconds.
-		Weight::from_parts(6_212_000, 0)
+		// Minimum execution time: 6_329_000 picoseconds.
+		Weight::from_parts(6_665_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -67,8 +68,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_851_000 picoseconds.
-		Weight::from_parts(8_847_000, 0)
+		// Minimum execution time: 12_110_000 picoseconds.
+		Weight::from_parts(12_883_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -78,8 +79,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_851_000 picoseconds.
-		Weight::from_parts(8_847_000, 0)
+		// Minimum execution time: 12_241_000 picoseconds.
+		Weight::from_parts(12_780_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -87,44 +88,64 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 631_000 picoseconds.
-		Weight::from_parts(3_086_000, 0)
+		// Minimum execution time: 825_000 picoseconds.
+		Weight::from_parts(890_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_446_000 picoseconds.
-		Weight::from_parts(5_911_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 10_159_000 picoseconds.
+		Weight::from_parts(10_461_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 481_000 picoseconds.
-		Weight::from_parts(2_916_000, 0)
+		// Minimum execution time: 578_000 picoseconds.
+		Weight::from_parts(660_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 501_000 picoseconds.
-		Weight::from_parts(2_595_000, 0)
+		// Minimum execution time: 618_000 picoseconds.
+		Weight::from_parts(682_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_927_000 picoseconds.
-		Weight::from_parts(6_613_000, 0)
+		// Minimum execution time: 9_964_000 picoseconds.
+		Weight::from_parts(10_419_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 4_890_000 picoseconds.
+		Weight::from_parts(5_163_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs
index 3ea7b02a3024784a4acc3fb87a222a26de32d7a2..d653838ad80e61d9dd96a705a1871fe4c4858fa8 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs
@@ -15,6 +15,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
index 3fabea3b02f4f6f7904192055a82202b0416bc84..3dba65ae99f183581710736581d45017b4a79c59 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
@@ -72,11 +72,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -151,11 +151,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking/std",
 	"frame-executive/std",
@@ -230,6 +230,7 @@ runtime-benchmarks = [
 	"bridge-runtime-common/runtime-benchmarks",
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -272,6 +273,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
index 492b731610ce7cf5c54d9505cf97ee9e4cb3f7cb..35af034310d9202337f38984e8c92c8480bf8a9a 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
@@ -124,20 +124,22 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	BridgeRejectObsoleteHeadersAndMessages,
-	(bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages,),
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		BridgeRejectObsoleteHeadersAndMessages,
+		(bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages,),
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -313,6 +315,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -555,6 +561,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -667,6 +674,7 @@ mod benches {
 		[pallet_collator_selection, CollatorSelection]
 		[cumulus_pallet_parachain_system, ParachainSystem]
 		[cumulus_pallet_xcmp_queue, XcmpQueue]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 		// XCM
 		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
 		// NOTE: Make sure you point to the individual modules below.
@@ -1498,18 +1506,8 @@ impl_runtime_apis! {
 				}
 			}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
@@ -1557,41 +1555,44 @@ mod tests {
 		use bp_polkadot_core::SuffixedCommonTransactionExtensionExt;
 
 		sp_io::TestExternalities::default().execute_with(|| {
-            frame_system::BlockHash::<Runtime>::insert(BlockNumber::zero(), Hash::default());
-            let payload: TxExtension = (
-                frame_system::CheckNonZeroSender::new(),
-                frame_system::CheckSpecVersion::new(),
-                frame_system::CheckTxVersion::new(),
-                frame_system::CheckGenesis::new(),
-                frame_system::CheckEra::from(Era::Immortal),
-                frame_system::CheckNonce::from(10),
-                frame_system::CheckWeight::new(),
-                pallet_transaction_payment::ChargeTransactionPayment::from(10),
-                BridgeRejectObsoleteHeadersAndMessages,
-                (
-                    bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),
-                ),
-                frame_metadata_hash_extension::CheckMetadataHash::new(false),
-				cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
-            );
-
-            // for BridgeHubRococo
-            {
-                let bhr_indirect_payload = bp_bridge_hub_rococo::TransactionExtension::from_params(
-                    VERSION.spec_version,
-                    VERSION.transaction_version,
-                    bp_runtime::TransactionEra::Immortal,
-                    System::block_hash(BlockNumber::zero()),
-                    10,
-                    10,
-                    (((), ()), ((), ())),
-                );
-                assert_eq!(payload.encode().split_last().unwrap().1, bhr_indirect_payload.encode());
-                assert_eq!(
-                    TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1,
-                    sp_runtime::traits::TransactionExtension::<RuntimeCall>::implicit(&bhr_indirect_payload).unwrap().encode()
-                )
-            }
-        });
+			frame_system::BlockHash::<Runtime>::insert(BlockNumber::zero(), Hash::default());
+			let payload: TxExtension = (
+				frame_system::CheckNonZeroSender::new(),
+				frame_system::CheckSpecVersion::new(),
+				frame_system::CheckTxVersion::new(),
+				frame_system::CheckGenesis::new(),
+				frame_system::CheckEra::from(Era::Immortal),
+				frame_system::CheckNonce::from(10),
+				frame_system::CheckWeight::new(),
+				pallet_transaction_payment::ChargeTransactionPayment::from(10),
+				BridgeRejectObsoleteHeadersAndMessages,
+				(
+					bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),
+				),
+				frame_metadata_hash_extension::CheckMetadataHash::new(false),
+			).into();
+
+			// for BridgeHubRococo
+			{
+				let bhr_indirect_payload = bp_bridge_hub_rococo::TransactionExtension::from_params(
+					VERSION.spec_version,
+					VERSION.transaction_version,
+					bp_runtime::TransactionEra::Immortal,
+					System::block_hash(BlockNumber::zero()),
+					10,
+					10,
+					(((), ()), ((), ())),
+				);
+				assert_eq!(payload.encode().split_last().unwrap().1, bhr_indirect_payload.encode());
+				assert_eq!(
+					TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1,
+					sp_runtime::traits::TransactionExtension::<RuntimeCall>::implicit(
+						&bhr_indirect_payload
+					)
+					.unwrap()
+					.encode()
+				)
+			}
+		});
 	}
 }
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ca1d8dcbe56782dba07ea4a2bae3a4978394a1f5
--- /dev/null
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=bridge-hub-rococo-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_988_000 picoseconds.
+		Weight::from_parts(7_361_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs
index 64eef1b4f7405abe40327241fe538ed1373edeab..93fb6f3bbbe30706eb278000a66c7e48568cf9e5 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs
@@ -16,28 +16,26 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
 // --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
 // --pallet=frame_system_extensions
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
+// --chain=bridge-hub-rococo-dev
 // --header=./cumulus/file_header.txt
 // --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/
-// --chain=bridge-hub-rococo-dev
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -50,81 +48,92 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 3_136_000 picoseconds.
-		Weight::from_parts(5_842_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 4_211_000 picoseconds.
+		Weight::from_parts(4_470_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_771_000 picoseconds.
-		Weight::from_parts(8_857_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 8_792_000 picoseconds.
+		Weight::from_parts(9_026_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_771_000 picoseconds.
-		Weight::from_parts(8_857_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 8_700_000 picoseconds.
+		Weight::from_parts(9_142_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 732_000 picoseconds.
-		Weight::from_parts(2_875_000, 0)
+		// Minimum execution time: 487_000 picoseconds.
+		Weight::from_parts(534_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_627_000 picoseconds.
-		Weight::from_parts(6_322_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 6_719_000 picoseconds.
+		Weight::from_parts(6_846_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 471_000 picoseconds.
-		Weight::from_parts(2_455_000, 0)
+		// Minimum execution time: 410_000 picoseconds.
+		Weight::from_parts(442_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 491_000 picoseconds.
-		Weight::from_parts(2_916_000, 0)
+		// Minimum execution time: 390_000 picoseconds.
+		Weight::from_parts(425_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_798_000 picoseconds.
-		Weight::from_parts(6_272_000, 0)
+		// Minimum execution time: 5_965_000 picoseconds.
+		Weight::from_parts(6_291_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 2_738_000 picoseconds.
+		Weight::from_parts(2_915_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs
index 74796e626a2ec24b9eec32e5a336819fdb6bc507..7a0accf2e7a4530c291e02fcdb742e834bd2e7c9 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs
@@ -24,6 +24,7 @@ use ::pallet_bridge_relayers::WeightInfo as _;
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs
index d5baa1c71dfdc7d687dbbaa573be2b05a0c59e9e..c40aae5a82a9035c787f391ebc712d356e56afb5 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs
@@ -184,8 +184,8 @@ fn construct_extrinsic(
 		BridgeRejectObsoleteHeadersAndMessages::default(),
 		(OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),),
 		frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(false),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
-	);
+	)
+		.into();
 	let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap();
 	let signature = payload.using_encoded(|e| sender.sign(e));
 	UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), tx_ext)
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs
index 8d74b221a609f21f879faf2b6f6577ac22ae9b9f..b0f4366e29cf057b7ed6bc6b4c8a97fc46249456 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs
@@ -63,7 +63,6 @@ fn construct_extrinsic(
 		BridgeRejectObsoleteHeadersAndMessages::default(),
 		(bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),),
 		frame_metadata_hash_extension::CheckMetadataHash::new(false),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
 	)
 		.into();
 	let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap();
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml
index 644aa72d131162510a2fb34d4ad1ed195eafef5e..444023eac722e1b7087a789ec68856f0c9cd812c 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml
@@ -72,11 +72,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 
 pallet-collator-selection = { workspace = true }
@@ -148,11 +148,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking/std",
 	"frame-executive/std",
@@ -227,6 +227,7 @@ runtime-benchmarks = [
 	"bridge-runtime-common/runtime-benchmarks",
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -269,6 +270,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
index edf79ea0c3152a5eae5da4bc8c085e36c535c2f5..2c2e01b4d21dc7160e73b09d3612da05a594af87 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
@@ -120,20 +120,22 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	BridgeRejectObsoleteHeadersAndMessages,
-	(bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,),
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		BridgeRejectObsoleteHeadersAndMessages,
+		(bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,),
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -299,6 +301,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -532,6 +538,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -622,6 +629,7 @@ mod benches {
 		[snowbridge_pallet_outbound_queue, EthereumOutboundQueue]
 		[snowbridge_pallet_system, EthereumSystem]
 		[snowbridge_pallet_ethereum_client, EthereumBeaconClient]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -1315,18 +1323,8 @@ impl_runtime_apis! {
 				}
 			}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
@@ -1379,40 +1377,43 @@ mod tests {
 		use bp_polkadot_core::SuffixedCommonTransactionExtensionExt;
 
 		sp_io::TestExternalities::default().execute_with(|| {
-            frame_system::BlockHash::<Runtime>::insert(BlockNumber::zero(), Hash::default());
-            let payload: TxExtension = (
-                frame_system::CheckNonZeroSender::new(),
-                frame_system::CheckSpecVersion::new(),
-                frame_system::CheckTxVersion::new(),
-                frame_system::CheckGenesis::new(),
-                frame_system::CheckEra::from(Era::Immortal),
-                frame_system::CheckNonce::from(10),
-                frame_system::CheckWeight::new(),
-                pallet_transaction_payment::ChargeTransactionPayment::from(10),
-                BridgeRejectObsoleteHeadersAndMessages,
-                (
-                    bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),
-                ),
+			frame_system::BlockHash::<Runtime>::insert(BlockNumber::zero(), Hash::default());
+			let payload: TxExtension = (
+				frame_system::CheckNonZeroSender::new(),
+				frame_system::CheckSpecVersion::new(),
+				frame_system::CheckTxVersion::new(),
+				frame_system::CheckGenesis::new(),
+				frame_system::CheckEra::from(Era::Immortal),
+				frame_system::CheckNonce::from(10),
+				frame_system::CheckWeight::new(),
+				pallet_transaction_payment::ChargeTransactionPayment::from(10),
+				BridgeRejectObsoleteHeadersAndMessages,
+				(
+					bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),
+				),
 				frame_metadata_hash_extension::CheckMetadataHash::new(false),
-                cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
-            );
-
-            {
-                let bh_indirect_payload = bp_bridge_hub_westend::TransactionExtension::from_params(
-                    VERSION.spec_version,
-                    VERSION.transaction_version,
-                    bp_runtime::TransactionEra::Immortal,
-                    System::block_hash(BlockNumber::zero()),
-                    10,
-                    10,
-                    (((), ()), ((), ())),
-                );
-                assert_eq!(payload.encode().split_last().unwrap().1, bh_indirect_payload.encode());
-                assert_eq!(
-                    TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1,
-                    sp_runtime::traits::TransactionExtension::<RuntimeCall>::implicit(&bh_indirect_payload).unwrap().encode()
-                )
-            }
-        });
+			).into();
+
+			{
+				let bh_indirect_payload = bp_bridge_hub_westend::TransactionExtension::from_params(
+					VERSION.spec_version,
+					VERSION.transaction_version,
+					bp_runtime::TransactionEra::Immortal,
+					System::block_hash(BlockNumber::zero()),
+					10,
+					10,
+					(((), ()), ((), ())),
+				);
+				assert_eq!(payload.encode().split_last().unwrap().1, bh_indirect_payload.encode());
+				assert_eq!(
+					TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1,
+					sp_runtime::traits::TransactionExtension::<RuntimeCall>::implicit(
+						&bh_indirect_payload
+					)
+					.unwrap()
+					.encode()
+				)
+			}
+		});
 	}
 }
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..955b273254562376b7cdcb0c4175648a4de935f2
--- /dev/null
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=bridge-hub-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_810_000 picoseconds.
+		Weight::from_parts(7_250_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs
index 459b137d3b8419765362e4501db525db31135a46..21cadac25e161565c43e03efc9e0684db1c28bc4 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs
@@ -16,28 +16,26 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
 // --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
 // --pallet=frame_system_extensions
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
+// --chain=bridge-hub-westend-dev
 // --header=./cumulus/file_header.txt
 // --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/
-// --chain=bridge-hub-westend-dev
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -50,81 +48,92 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 3_166_000 picoseconds.
-		Weight::from_parts(6_021_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 4_363_000 picoseconds.
+		Weight::from_parts(4_521_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_651_000 picoseconds.
-		Weight::from_parts(9_177_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 8_522_000 picoseconds.
+		Weight::from_parts(8_847_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_651_000 picoseconds.
-		Weight::from_parts(9_177_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 8_617_000 picoseconds.
+		Weight::from_parts(8_789_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 601_000 picoseconds.
-		Weight::from_parts(2_805_000, 0)
+		// Minimum execution time: 485_000 picoseconds.
+		Weight::from_parts(557_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_727_000 picoseconds.
-		Weight::from_parts(6_051_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 6_682_000 picoseconds.
+		Weight::from_parts(6_821_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 471_000 picoseconds.
-		Weight::from_parts(2_494_000, 0)
+		// Minimum execution time: 390_000 picoseconds.
+		Weight::from_parts(441_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 521_000 picoseconds.
-		Weight::from_parts(2_655_000, 0)
+		// Minimum execution time: 395_000 picoseconds.
+		Weight::from_parts(455_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_808_000 picoseconds.
-		Weight::from_parts(6_402_000, 0)
+		// Minimum execution time: 6_134_000 picoseconds.
+		Weight::from_parts(6_308_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 2_764_000 picoseconds.
+		Weight::from_parts(2_893_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs
index c1c5c337aca8900e7b6314b0e1aa56ed9a5fdec2..313da55831c8f2446ec570f5c78432ebc1ddca35 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs
@@ -24,6 +24,7 @@ use ::pallet_bridge_relayers::WeightInfo as _;
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs
index d71400fa71b625467144d3f793be1604d95ca146..bc570ef7f74bc58495002cb49507304b928a38d6 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs
@@ -185,8 +185,8 @@ fn construct_extrinsic(
 		BridgeRejectObsoleteHeadersAndMessages::default(),
 		(bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),),
 		frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(false),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
-	);
+	)
+		.into();
 	let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap();
 	let signature = payload.using_encoded(|e| sender.sign(e));
 	UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra)
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs
index 9d32f28f4fc6c69e96ec37d36c47e7a976be9357..d7e7fbe0c72e56f807c2d488d421519086171a77 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs
@@ -95,7 +95,6 @@ fn construct_extrinsic(
 		BridgeRejectObsoleteHeadersAndMessages::default(),
 		(bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),),
 		frame_metadata_hash_extension::CheckMetadataHash::new(false),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
 	)
 		.into();
 	let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap();
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
index 9c70b65060dd93a2dee8d8f8fa2f849ef1ef330c..2786321e48e2edcc9a975a5ec6f159c0bef41ad5 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
@@ -77,11 +77,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-message-queue = { workspace = true }
 
@@ -103,6 +103,7 @@ default = ["std"]
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -143,6 +144,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
@@ -182,11 +184,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
index 5c2ba2e24c22c9f41a2e0d627df071006c9606a3..e9adc4d1eae74fe0098a5de59889c10b7a101867 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
@@ -191,6 +191,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -669,6 +673,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -735,16 +740,19 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+	),
+>;
+
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
@@ -806,6 +814,7 @@ mod benches {
 		[pallet_salary, AmbassadorSalary]
 		[pallet_treasury, FellowshipTreasury]
 		[pallet_asset_rate, AssetRate]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -1139,18 +1148,8 @@ impl_runtime_apis! {
 				}
 			}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..c286ba132022791157e6bd9861375c62156edf61
--- /dev/null
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=collectives-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_745_000 picoseconds.
+		Weight::from_parts(6_948_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs
index f32f2730313570cad4a759e64b2156f30110f60b..8c2abcd4e8c88fa2e888eb3caca38b57f9bac5eb 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs
@@ -16,28 +16,26 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
 // --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
 // --pallet=frame_system_extensions
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
+// --chain=collectives-westend-dev
 // --header=./cumulus/file_header.txt
 // --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/
-// --chain=collectives-westend-dev
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -50,81 +48,92 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 3_497_000 picoseconds.
-		Weight::from_parts(5_961_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 4_206_000 picoseconds.
+		Weight::from_parts(4_485_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_240_000 picoseconds.
-		Weight::from_parts(8_175_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 7_537_000 picoseconds.
+		Weight::from_parts(7_706_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_240_000 picoseconds.
-		Weight::from_parts(8_175_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 7_512_000 picoseconds.
+		Weight::from_parts(7_655_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 671_000 picoseconds.
-		Weight::from_parts(3_005_000, 0)
+		// Minimum execution time: 447_000 picoseconds.
+		Weight::from_parts(499_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_426_000 picoseconds.
-		Weight::from_parts(6_131_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 6_667_000 picoseconds.
+		Weight::from_parts(6_868_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 501_000 picoseconds.
-		Weight::from_parts(2_715_000, 0)
+		// Minimum execution time: 389_000 picoseconds.
+		Weight::from_parts(420_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 491_000 picoseconds.
-		Weight::from_parts(2_635_000, 0)
+		// Minimum execution time: 379_000 picoseconds.
+		Weight::from_parts(420_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_958_000 picoseconds.
-		Weight::from_parts(6_753_000, 0)
+		// Minimum execution time: 6_330_000 picoseconds.
+		Weight::from_parts(6_605_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 2_784_000 picoseconds.
+		Weight::from_parts(2_960_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs
index 00b3bd92d5ef9ce0081ca9e95e43bd35098c6aa0..ce85d23b21cbb6c5b66b61d4f90705bf7ad07ab4 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs
@@ -15,6 +15,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
@@ -46,6 +47,7 @@ pub mod pallet_utility;
 pub mod pallet_xcm;
 pub mod paritydb_weights;
 pub mod rocksdb_weights;
+pub mod xcm;
 
 pub use block_weights::constants::BlockExecutionWeight;
 pub use extrinsic_weights::constants::ExtrinsicBaseWeight;
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs
index 6bedfcc7e012383657a059b87942517644057d8a..4d092ec80313bc8fbd7d6efee143342f8ea19580 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs
@@ -1,4 +1,4 @@
-// Copyright Parity Technologies (UK) Ltd.
+// Copyright (C) Parity Technologies (UK) Ltd.
 // This file is part of Cumulus.
 
 // Cumulus is free software: you can redistribute it and/or modify
@@ -16,25 +16,29 @@
 
 //! Autogenerated weights for `pallet_core_fellowship`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `cob`, CPU: `<UNKNOWN>`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024
+//! HOSTNAME: `623e9e4b814e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --chain=collectives-polkadot-dev
-// --wasm-execution=compiled
-// --pallet=pallet_core_fellowship
 // --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/
+// --runtime=target/production/wbuild/collectives-westend-runtime/collectives_westend_runtime.wasm
+// --pallet=pallet_core_fellowship
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -48,25 +52,26 @@ use core::marker::PhantomData;
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<T> {
 	/// Storage: `AmbassadorCore::Params` (r:0 w:1)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	fn set_params() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 11_000_000 picoseconds.
-		Weight::from_parts(11_000_000, 0)
+		// Minimum execution time: 9_131_000 picoseconds.
+		Weight::from_parts(9_371_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: `AmbassadorCore::Params` (r:0 w:1)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCore::Params` (r:1 w:1)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	fn set_partial_params() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 11_000_000 picoseconds.
-		Weight::from_parts(11_000_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `471`
+		//  Estimated: `1853`
+		// Minimum execution time: 18_375_000 picoseconds.
+		Weight::from_parts(18_872_000, 0)
+			.saturating_add(Weight::from_parts(0, 1853))
+			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
@@ -74,44 +79,48 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Storage: `AmbassadorCollective::Members` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::Params` (r:1 w:0)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
-	/// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0)
+	/// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1)
 	/// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1)
+	/// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn bump_offboard() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66011`
+		//  Measured:  `66402`
 		//  Estimated: `69046`
-		// Minimum execution time: 96_000_000 picoseconds.
-		Weight::from_parts(111_000_000, 0)
+		// Minimum execution time: 156_752_000 picoseconds.
+		Weight::from_parts(164_242_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(4))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
 	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
 	/// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCollective::Members` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::Params` (r:1 w:0)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
-	/// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0)
+	/// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1)
 	/// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1)
+	/// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn bump_demote() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66121`
+		//  Measured:  `66512`
 		//  Estimated: `69046`
-		// Minimum execution time: 99_000_000 picoseconds.
-		Weight::from_parts(116_000_000, 0)
+		// Minimum execution time: 158_877_000 picoseconds.
+		Weight::from_parts(165_228_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(4))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
 	/// Storage: `AmbassadorCollective::Members` (r:1 w:0)
 	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -121,8 +130,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `360`
 		//  Estimated: `3514`
-		// Minimum execution time: 21_000_000 picoseconds.
-		Weight::from_parts(22_000_000, 0)
+		// Minimum execution time: 25_056_000 picoseconds.
+		Weight::from_parts(26_028_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -141,8 +150,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `118`
 		//  Estimated: `3514`
-		// Minimum execution time: 36_000_000 picoseconds.
-		Weight::from_parts(36_000_000, 0)
+		// Minimum execution time: 34_784_000 picoseconds.
+		Weight::from_parts(35_970_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(5))
@@ -152,7 +161,7 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
 	/// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::Params` (r:1 w:0)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1)
@@ -163,25 +172,40 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn promote() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `65989`
+		//  Measured:  `66055`
 		//  Estimated: `69046`
-		// Minimum execution time: 95_000_000 picoseconds.
-		Weight::from_parts(110_000_000, 0)
+		// Minimum execution time: 147_616_000 picoseconds.
+		Weight::from_parts(154_534_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().writes(6))
 	}
+	/// Storage: `AmbassadorCollective::Members` (r:1 w:1)
+	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
+	/// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::MemberCount` (r:9 w:9)
+	/// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1)
+	/// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::IndexToId` (r:0 w:9)
+	/// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:9)
+	/// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
+	/// The range of component `r` is `[1, 9]`.
+	/// The range of component `r` is `[1, 9]`.
 	fn promote_fast(r: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `16844`
-		//  Estimated: `19894 + r * (2489 ±0)`
-		// Minimum execution time: 45_065_000 picoseconds.
-		Weight::from_parts(34_090_392, 19894)
-			// Standard Error: 18_620
-			.saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into()))
-			.saturating_add(T::DbWeight::get().reads(3_u64))
+		//  Measured:  `65968`
+		//  Estimated: `69046 + r * (2489 ±0)`
+		// Minimum execution time: 138_323_000 picoseconds.
+		Weight::from_parts(125_497_264, 0)
+			.saturating_add(Weight::from_parts(0, 69046))
+			// Standard Error: 56_050
+			.saturating_add(Weight::from_parts(19_863_853, 0).saturating_mul(r.into()))
+			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into())))
-			.saturating_add(T::DbWeight::get().writes(3_u64))
+			.saturating_add(T::DbWeight::get().writes(3))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into())))
 			.saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into()))
 	}
@@ -193,10 +217,10 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
 	fn offboard() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `331`
+		//  Measured:  `265`
 		//  Estimated: `3514`
-		// Minimum execution time: 21_000_000 picoseconds.
-		Weight::from_parts(22_000_000, 0)
+		// Minimum execution time: 26_903_000 picoseconds.
+		Weight::from_parts(27_645_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -209,8 +233,22 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `285`
 		//  Estimated: `3514`
-		// Minimum execution time: 20_000_000 picoseconds.
-		Weight::from_parts(21_000_000, 0)
+		// Minimum execution time: 23_286_000 picoseconds.
+		Weight::from_parts(23_848_000, 0)
+			.saturating_add(Weight::from_parts(0, 3514))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
+	/// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::Members` (r:1 w:0)
+	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
+	fn import_member() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `285`
+		//  Estimated: `3514`
+		// Minimum execution time: 23_239_000 picoseconds.
+		Weight::from_parts(23_684_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -225,8 +263,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `65967`
 		//  Estimated: `69046`
-		// Minimum execution time: 78_000_000 picoseconds.
-		Weight::from_parts(104_000_000, 0)
+		// Minimum execution time: 125_987_000 picoseconds.
+		Weight::from_parts(130_625_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -239,8 +277,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `151`
 		//  Estimated: `69046`
-		// Minimum execution time: 43_000_000 picoseconds.
-		Weight::from_parts(44_000_000, 0)
+		// Minimum execution time: 104_431_000 picoseconds.
+		Weight::from_parts(106_646_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs
index 05014e273f0009bf212969ba8705879747eedd75..acb1f82985db700ffe4d6f49bf15d85af50fd15a 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs
@@ -1,39 +1,44 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
+// This file is part of Cumulus.
 
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
 //! Autogenerated weights for `pallet_core_fellowship`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `cob`, CPU: `<UNKNOWN>`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024
+//! HOSTNAME: `623e9e4b814e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --chain=collectives-polkadot-dev
-// --wasm-execution=compiled
-// --pallet=pallet_core_fellowship
 // --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/
+// --runtime=target/production/wbuild/collectives-westend-runtime/collectives_westend_runtime.wasm
+// --pallet=pallet_core_fellowship
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -47,25 +52,26 @@ use core::marker::PhantomData;
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<T> {
 	/// Storage: `FellowshipCore::Params` (r:0 w:1)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	fn set_params() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 11_000_000 picoseconds.
-		Weight::from_parts(12_000_000, 0)
+		// Minimum execution time: 9_115_000 picoseconds.
+		Weight::from_parts(9_523_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: `FellowshipCore::Params` (r:0 w:1)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCore::Params` (r:1 w:1)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	fn set_partial_params() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 11_000_000 picoseconds.
-		Weight::from_parts(12_000_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `504`
+		//  Estimated: `1853`
+		// Minimum execution time: 18_294_000 picoseconds.
+		Weight::from_parts(18_942_000, 0)
+			.saturating_add(Weight::from_parts(0, 1853))
+			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `FellowshipCore::Member` (r:1 w:1)
@@ -73,44 +79,48 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Storage: `FellowshipCollective::Members` (r:1 w:1)
 	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::Params` (r:1 w:0)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCollective::MemberCount` (r:1 w:1)
 	/// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
-	/// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0)
+	/// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1)
 	/// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1)
 	/// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::IndexToId` (r:0 w:1)
+	/// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn bump_offboard() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66144`
+		//  Measured:  `66535`
 		//  Estimated: `69046`
-		// Minimum execution time: 109_000_000 picoseconds.
-		Weight::from_parts(125_000_000, 0)
+		// Minimum execution time: 152_823_000 picoseconds.
+		Weight::from_parts(158_737_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(4))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
 	/// Storage: `FellowshipCore::Member` (r:1 w:1)
 	/// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCollective::Members` (r:1 w:1)
 	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::Params` (r:1 w:0)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCollective::MemberCount` (r:1 w:1)
 	/// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
-	/// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0)
+	/// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1)
 	/// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1)
 	/// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::IndexToId` (r:0 w:1)
+	/// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn bump_demote() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66254`
+		//  Measured:  `66645`
 		//  Estimated: `69046`
-		// Minimum execution time: 112_000_000 picoseconds.
-		Weight::from_parts(114_000_000, 0)
+		// Minimum execution time: 157_605_000 picoseconds.
+		Weight::from_parts(162_341_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(4))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
 	/// Storage: `FellowshipCollective::Members` (r:1 w:0)
 	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -120,8 +130,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `493`
 		//  Estimated: `3514`
-		// Minimum execution time: 22_000_000 picoseconds.
-		Weight::from_parts(27_000_000, 0)
+		// Minimum execution time: 25_194_000 picoseconds.
+		Weight::from_parts(26_262_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -140,8 +150,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `251`
 		//  Estimated: `3514`
-		// Minimum execution time: 35_000_000 picoseconds.
-		Weight::from_parts(36_000_000, 0)
+		// Minimum execution time: 35_479_000 picoseconds.
+		Weight::from_parts(36_360_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(5))
@@ -151,7 +161,7 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Storage: `FellowshipCore::Member` (r:1 w:1)
 	/// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::Params` (r:1 w:0)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCollective::MemberCount` (r:1 w:1)
 	/// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1)
@@ -162,25 +172,40 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn promote() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66122`
+		//  Measured:  `66188`
 		//  Estimated: `69046`
-		// Minimum execution time: 97_000_000 picoseconds.
-		Weight::from_parts(129_000_000, 0)
+		// Minimum execution time: 147_993_000 picoseconds.
+		Weight::from_parts(153_943_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().writes(6))
 	}
+	/// Storage: `FellowshipCollective::Members` (r:1 w:1)
+	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCore::Member` (r:1 w:1)
+	/// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::MemberCount` (r:9 w:9)
+	/// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1)
+	/// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::IndexToId` (r:0 w:9)
+	/// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::IdToIndex` (r:0 w:9)
+	/// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
+	/// The range of component `r` is `[1, 9]`.
+	/// The range of component `r` is `[1, 9]`.
 	fn promote_fast(r: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `16844`
-		//  Estimated: `19894 + r * (2489 ±0)`
-		// Minimum execution time: 45_065_000 picoseconds.
-		Weight::from_parts(34_090_392, 19894)
-			// Standard Error: 18_620
-			.saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into()))
-			.saturating_add(T::DbWeight::get().reads(3_u64))
+		//  Measured:  `66101`
+		//  Estimated: `69046 + r * (2489 ±0)`
+		// Minimum execution time: 138_444_000 picoseconds.
+		Weight::from_parts(125_440_035, 0)
+			.saturating_add(Weight::from_parts(0, 69046))
+			// Standard Error: 55_452
+			.saturating_add(Weight::from_parts(19_946_954, 0).saturating_mul(r.into()))
+			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into())))
-			.saturating_add(T::DbWeight::get().writes(3_u64))
+			.saturating_add(T::DbWeight::get().writes(3))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into())))
 			.saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into()))
 	}
@@ -192,10 +217,10 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
 	fn offboard() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `464`
+		//  Measured:  `398`
 		//  Estimated: `3514`
-		// Minimum execution time: 22_000_000 picoseconds.
-		Weight::from_parts(22_000_000, 0)
+		// Minimum execution time: 27_392_000 picoseconds.
+		Weight::from_parts(28_134_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -208,8 +233,22 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `418`
 		//  Estimated: `3514`
-		// Minimum execution time: 20_000_000 picoseconds.
-		Weight::from_parts(24_000_000, 0)
+		// Minimum execution time: 23_523_000 picoseconds.
+		Weight::from_parts(24_046_000, 0)
+			.saturating_add(Weight::from_parts(0, 3514))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `FellowshipCore::Member` (r:1 w:1)
+	/// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::Members` (r:1 w:0)
+	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
+	fn import_member() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `418`
+		//  Estimated: `3514`
+		// Minimum execution time: 23_369_000 picoseconds.
+		Weight::from_parts(24_088_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -224,8 +263,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `66100`
 		//  Estimated: `69046`
-		// Minimum execution time: 89_000_000 picoseconds.
-		Weight::from_parts(119_000_000, 0)
+		// Minimum execution time: 127_137_000 picoseconds.
+		Weight::from_parts(131_638_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -238,8 +277,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `184`
 		//  Estimated: `69046`
-		// Minimum execution time: 43_000_000 picoseconds.
-		Weight::from_parts(52_000_000, 0)
+		// Minimum execution time: 103_212_000 picoseconds.
+		Weight::from_parts(105_488_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d73ce8c440fc8d2e8b52981bb9253c5e62b39886
--- /dev/null
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs
@@ -0,0 +1,273 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod pallet_xcm_benchmarks_fungible;
+mod pallet_xcm_benchmarks_generic;
+
+use crate::{xcm_config::MaxAssetsIntoHolding, Runtime};
+use alloc::vec::Vec;
+use frame_support::weights::Weight;
+use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight;
+use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric;
+use sp_runtime::BoundedVec;
+use xcm::{
+	latest::{prelude::*, AssetTransferFilter},
+	DoubleEncoded,
+};
+
+trait WeighAssets {
+	fn weigh_assets(&self, weight: Weight) -> Weight;
+}
+
+// Collectives only knows about WND.
+const MAX_ASSETS: u64 = 1;
+
+impl WeighAssets for AssetFilter {
+	fn weigh_assets(&self, weight: Weight) -> Weight {
+		match self {
+			Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64),
+			Self::Wild(asset) => match asset {
+				All => weight.saturating_mul(MAX_ASSETS),
+				AllOf { fun, .. } => match fun {
+					WildFungibility::Fungible => weight,
+					// Magic number 2 has to do with the fact that we could have up to 2 times
+					// MaxAssetsIntoHolding in the worst-case scenario.
+					WildFungibility::NonFungible =>
+						weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64),
+				},
+				AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)),
+				AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)),
+			},
+		}
+	}
+}
+
+impl WeighAssets for Assets {
+	fn weigh_assets(&self, weight: Weight) -> Weight {
+		weight.saturating_mul(self.inner().iter().count() as u64)
+	}
+}
+
+pub struct CollectivesWestendXcmWeight<Call>(core::marker::PhantomData<Call>);
+impl<Call> XcmWeightInfo<Call> for CollectivesWestendXcmWeight<Call> {
+	fn withdraw_asset(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::withdraw_asset())
+	}
+	fn reserve_asset_deposited(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
+	}
+	fn receive_teleported_asset(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::receive_teleported_asset())
+	}
+	fn query_response(
+		_query_id: &u64,
+		_response: &Response,
+		_max_weight: &Weight,
+		_querier: &Option<Location>,
+	) -> Weight {
+		XcmGeneric::<Runtime>::query_response()
+	}
+	fn transfer_asset(assets: &Assets, _dest: &Location) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::transfer_asset())
+	}
+	fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::transfer_reserve_asset())
+	}
+	fn transact(
+		_origin_type: &OriginKind,
+		_fallback_max_weight: &Option<Weight>,
+		_call: &DoubleEncoded<Call>,
+	) -> Weight {
+		XcmGeneric::<Runtime>::transact()
+	}
+	fn hrmp_new_channel_open_request(
+		_sender: &u32,
+		_max_message_size: &u32,
+		_max_capacity: &u32,
+	) -> Weight {
+		// XCM Executor does not currently support HRMP channel operations
+		Weight::MAX
+	}
+	fn hrmp_channel_accepted(_recipient: &u32) -> Weight {
+		// XCM Executor does not currently support HRMP channel operations
+		Weight::MAX
+	}
+	fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight {
+		// XCM Executor does not currently support HRMP channel operations
+		Weight::MAX
+	}
+	fn clear_origin() -> Weight {
+		XcmGeneric::<Runtime>::clear_origin()
+	}
+	fn descend_origin(_who: &InteriorLocation) -> Weight {
+		XcmGeneric::<Runtime>::descend_origin()
+	}
+	fn report_error(_query_response_info: &QueryResponseInfo) -> Weight {
+		XcmGeneric::<Runtime>::report_error()
+	}
+
+	fn deposit_asset(assets: &AssetFilter, _dest: &Location) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::deposit_asset())
+	}
+	fn deposit_reserve_asset(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::deposit_reserve_asset())
+	}
+	fn exchange_asset(_give: &AssetFilter, _receive: &Assets, _maximal: &bool) -> Weight {
+		Weight::MAX
+	}
+	fn initiate_reserve_withdraw(
+		assets: &AssetFilter,
+		_reserve: &Location,
+		_xcm: &Xcm<()>,
+	) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::initiate_reserve_withdraw())
+	}
+	fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::initiate_teleport())
+	}
+	fn initiate_transfer(
+		_dest: &Location,
+		remote_fees: &Option<AssetTransferFilter>,
+		_preserve_origin: &bool,
+		assets: &Vec<AssetTransferFilter>,
+		_xcm: &Xcm<()>,
+	) -> Weight {
+		let mut weight = if let Some(remote_fees) = remote_fees {
+			let fees = remote_fees.inner();
+			fees.weigh_assets(XcmFungibleWeight::<Runtime>::initiate_transfer())
+		} else {
+			Weight::zero()
+		};
+		for asset_filter in assets {
+			let assets = asset_filter.inner();
+			let extra = assets.weigh_assets(XcmFungibleWeight::<Runtime>::initiate_transfer());
+			weight = weight.saturating_add(extra);
+		}
+		weight
+	}
+	fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight {
+		XcmGeneric::<Runtime>::report_holding()
+	}
+	fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight {
+		XcmGeneric::<Runtime>::buy_execution()
+	}
+	fn pay_fees(_asset: &Asset) -> Weight {
+		XcmGeneric::<Runtime>::pay_fees()
+	}
+	fn refund_surplus() -> Weight {
+		XcmGeneric::<Runtime>::refund_surplus()
+	}
+	fn set_error_handler(_xcm: &Xcm<Call>) -> Weight {
+		XcmGeneric::<Runtime>::set_error_handler()
+	}
+	fn set_appendix(_xcm: &Xcm<Call>) -> Weight {
+		XcmGeneric::<Runtime>::set_appendix()
+	}
+	fn clear_error() -> Weight {
+		XcmGeneric::<Runtime>::clear_error()
+	}
+	fn set_hints(hints: &BoundedVec<Hint, HintNumVariants>) -> Weight {
+		let mut weight = Weight::zero();
+		for hint in hints {
+			match hint {
+				AssetClaimer { .. } => {
+					weight = weight.saturating_add(XcmGeneric::<Runtime>::asset_claimer());
+				},
+			}
+		}
+		weight
+	}
+	fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight {
+		XcmGeneric::<Runtime>::claim_asset()
+	}
+	fn trap(_code: &u64) -> Weight {
+		XcmGeneric::<Runtime>::trap()
+	}
+	fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight {
+		XcmGeneric::<Runtime>::subscribe_version()
+	}
+	fn unsubscribe_version() -> Weight {
+		XcmGeneric::<Runtime>::unsubscribe_version()
+	}
+	fn burn_asset(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmGeneric::<Runtime>::burn_asset())
+	}
+	fn expect_asset(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmGeneric::<Runtime>::expect_asset())
+	}
+	fn expect_origin(_origin: &Option<Location>) -> Weight {
+		XcmGeneric::<Runtime>::expect_origin()
+	}
+	fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight {
+		XcmGeneric::<Runtime>::expect_error()
+	}
+	fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight {
+		XcmGeneric::<Runtime>::expect_transact_status()
+	}
+	fn query_pallet(_module_name: &Vec<u8>, _response_info: &QueryResponseInfo) -> Weight {
+		XcmGeneric::<Runtime>::query_pallet()
+	}
+	fn expect_pallet(
+		_index: &u32,
+		_name: &Vec<u8>,
+		_module_name: &Vec<u8>,
+		_crate_major: &u32,
+		_min_crate_minor: &u32,
+	) -> Weight {
+		XcmGeneric::<Runtime>::expect_pallet()
+	}
+	fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight {
+		XcmGeneric::<Runtime>::report_transact_status()
+	}
+	fn clear_transact_status() -> Weight {
+		XcmGeneric::<Runtime>::clear_transact_status()
+	}
+	fn universal_origin(_: &Junction) -> Weight {
+		Weight::MAX
+	}
+	fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight {
+		Weight::MAX
+	}
+	fn lock_asset(_: &Asset, _: &Location) -> Weight {
+		Weight::MAX
+	}
+	fn unlock_asset(_: &Asset, _: &Location) -> Weight {
+		Weight::MAX
+	}
+	fn note_unlockable(_: &Asset, _: &Location) -> Weight {
+		Weight::MAX
+	}
+	fn request_unlock(_: &Asset, _: &Location) -> Weight {
+		Weight::MAX
+	}
+	fn set_fees_mode(_: &bool) -> Weight {
+		XcmGeneric::<Runtime>::set_fees_mode()
+	}
+	fn set_topic(_topic: &[u8; 32]) -> Weight {
+		XcmGeneric::<Runtime>::set_topic()
+	}
+	fn clear_topic() -> Weight {
+		XcmGeneric::<Runtime>::clear_topic()
+	}
+	fn alias_origin(_: &Location) -> Weight {
+		XcmGeneric::<Runtime>::alias_origin()
+	}
+	fn unpaid_execution(_: &WeightLimit, _: &Option<Location>) -> Weight {
+		XcmGeneric::<Runtime>::unpaid_execution()
+	}
+	fn execute_with_origin(_: &Option<InteriorLocation>, _: &Xcm<Call>) -> Weight {
+		XcmGeneric::<Runtime>::execute_with_origin()
+	}
+}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
new file mode 100644
index 0000000000000000000000000000000000000000..00826cbb8d79ea3cbf9cc9b8dd0622e6b781df5e
--- /dev/null
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -0,0 +1,211 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("collectives-westend-dev"), DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::fungible
+// --chain=collectives-westend-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weights for `pallet_xcm_benchmarks::fungible`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo<T> {
+	// Storage: `System::Account` (r:1 w:1)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	pub fn withdraw_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 30_401_000 picoseconds.
+		Weight::from_parts(30_813_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	// Storage: `System::Account` (r:2 w:2)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	pub fn transfer_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `153`
+		//  Estimated: `6196`
+		// Minimum execution time: 43_150_000 picoseconds.
+		Weight::from_parts(43_919_000, 6196)
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	// Storage: `System::Account` (r:2 w:2)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn transfer_reserve_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `223`
+		//  Estimated: `6196`
+		// Minimum execution time: 67_808_000 picoseconds.
+		Weight::from_parts(69_114_000, 6196)
+			.saturating_add(T::DbWeight::get().reads(8))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	// Storage: `Benchmark::Override` (r:0 w:0)
+	// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	pub fn reserve_asset_deposited() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn initiate_reserve_withdraw() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 29_312_000 picoseconds.
+		Weight::from_parts(30_347_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	pub fn receive_teleported_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_283_000 picoseconds.
+		Weight::from_parts(2_448_000, 0)
+	}
+	// Storage: `System::Account` (r:1 w:1)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	pub fn deposit_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `52`
+		//  Estimated: `3593`
+		// Minimum execution time: 23_556_000 picoseconds.
+		Weight::from_parts(24_419_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `System::Account` (r:1 w:1)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn deposit_reserve_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `122`
+		//  Estimated: `3593`
+		// Minimum execution time: 58_342_000 picoseconds.
+		Weight::from_parts(59_598_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn initiate_teleport() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 28_285_000 picoseconds.
+		Weight::from_parts(29_016_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	// Storage: `System::Account` (r:1 w:1)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn initiate_transfer() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `122`
+		//  Estimated: `3593`
+		// Minimum execution time: 65_211_000 picoseconds.
+		Weight::from_parts(67_200_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ae94edc3d7315c0738f1b202cffb75ca1f968919
--- /dev/null
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs
@@ -0,0 +1,355 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `pallet_xcm_benchmarks::generic`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("collectives-westend-dev"), DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::generic
+// --chain=collectives-westend-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weights for `pallet_xcm_benchmarks::generic`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo<T> {
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn report_holding() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 29_015_000 picoseconds.
+		Weight::from_parts(30_359_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	pub fn buy_execution() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 572_000 picoseconds.
+		Weight::from_parts(637_000, 0)
+	}
+	pub fn pay_fees() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 1_550_000 picoseconds.
+		Weight::from_parts(1_604_000, 0)
+	}
+	// Storage: `PolkadotXcm::Queries` (r:1 w:0)
+	// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	pub fn query_response() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `32`
+		//  Estimated: `3497`
+		// Minimum execution time: 7_354_000 picoseconds.
+		Weight::from_parts(7_808_000, 3497)
+			.saturating_add(T::DbWeight::get().reads(1))
+	}
+	pub fn transact() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 6_716_000 picoseconds.
+		Weight::from_parts(7_067_000, 0)
+	}
+	pub fn refund_surplus() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 1_280_000 picoseconds.
+		Weight::from_parts(1_355_000, 0)
+	}
+	pub fn set_error_handler() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 587_000 picoseconds.
+		Weight::from_parts(645_000, 0)
+	}
+	pub fn set_appendix() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 629_000 picoseconds.
+		Weight::from_parts(662_000, 0)
+	}
+	pub fn clear_error() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 590_000 picoseconds.
+		Weight::from_parts(639_000, 0)
+	}
+	pub fn descend_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 651_000 picoseconds.
+		Weight::from_parts(688_000, 0)
+	}
+	pub fn clear_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 601_000 picoseconds.
+		Weight::from_parts(630_000, 0)
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn report_error() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 25_650_000 picoseconds.
+		Weight::from_parts(26_440_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1)
+	// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	pub fn claim_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `90`
+		//  Estimated: `3555`
+		// Minimum execution time: 10_492_000 picoseconds.
+		Weight::from_parts(10_875_000, 3555)
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	pub fn trap() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 597_000 picoseconds.
+		Weight::from_parts(647_000, 0)
+	}
+	// Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn subscribe_version() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `38`
+		//  Estimated: `3503`
+		// Minimum execution time: 23_732_000 picoseconds.
+		Weight::from_parts(24_290_000, 3503)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	// Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1)
+	// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	pub fn unsubscribe_version() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_446_000 picoseconds.
+		Weight::from_parts(2_613_000, 0)
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	pub fn burn_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 960_000 picoseconds.
+		Weight::from_parts(1_045_000, 0)
+	}
+	pub fn expect_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 703_000 picoseconds.
+		Weight::from_parts(739_000, 0)
+	}
+	pub fn expect_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 616_000 picoseconds.
+		Weight::from_parts(651_000, 0)
+	}
+	pub fn expect_error() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 621_000 picoseconds.
+		Weight::from_parts(660_000, 0)
+	}
+	pub fn expect_transact_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 794_000 picoseconds.
+		Weight::from_parts(831_000, 0)
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn query_pallet() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 29_527_000 picoseconds.
+		Weight::from_parts(30_614_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	pub fn expect_pallet() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 3_189_000 picoseconds.
+		Weight::from_parts(3_296_000, 0)
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn report_transact_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 25_965_000 picoseconds.
+		Weight::from_parts(26_468_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	pub fn clear_transact_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 618_000 picoseconds.
+		Weight::from_parts(659_000, 0)
+	}
+	pub fn set_topic() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 593_000 picoseconds.
+		Weight::from_parts(618_000, 0)
+	}
+	pub fn clear_topic() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 603_000 picoseconds.
+		Weight::from_parts(634_000, 0)
+	}
+	pub fn alias_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_000_000 picoseconds.
+		Weight::from_parts(2_000_000, 0)
+	}
+	pub fn set_fees_mode() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 568_000 picoseconds.
+		Weight::from_parts(629_000, 0)
+	}
+	pub fn unpaid_execution() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 598_000 picoseconds.
+		Weight::from_parts(655_000, 0)
+	}
+	pub fn asset_claimer() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 707_000 picoseconds.
+		Weight::from_parts(749_000, 0)
+	}
+	pub fn execute_with_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 713_000 picoseconds.
+		Weight::from_parts(776_000, 0)
+	}
+}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs
index 9eb9b85a391897fb9ef0bd9856cd622b428c8fc9..c5ab21fe8f904372fc4d12b20961006fd01df612 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs
@@ -21,7 +21,6 @@ use super::{
 use frame_support::{
 	parameter_types,
 	traits::{tokens::imbalance::ResolveTo, ConstU32, Contains, Equals, Everything, Nothing},
-	weights::Weight,
 };
 use frame_system::EnsureRoot;
 use pallet_collator_selection::StakingPotAccountId;
@@ -39,12 +38,12 @@ use xcm_builder::{
 	AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain,
 	AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom,
 	DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily,
-	EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter,
-	HashedDescription, IsConcrete, LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser,
-	ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative,
-	SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32,
-	SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents,
-	WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents,
+	EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete,
+	LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset,
+	RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia,
+	SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit,
+	TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic,
+	XcmFeeManagerFromComponents,
 };
 use xcm_executor::XcmExecutor;
 
@@ -125,11 +124,6 @@ pub type XcmOriginToTransactDispatchOrigin = (
 );
 
 parameter_types! {
-	/// The amount of weight an XCM operation takes. This is a safe overestimate.
-	pub const BaseXcmWeight: Weight = Weight::from_parts(1_000_000_000, 1024);
-	/// A temporary weight value for each XCM instruction.
-	/// NOTE: This should be removed after we account for PoV weights.
-	pub const TempFixedXcmWeight: Weight = Weight::from_parts(1_000_000_000, 0);
 	pub const MaxInstructions: u32 = 100;
 	pub const MaxAssetsIntoHolding: u32 = 64;
 	// Fellows pluralistic body.
@@ -208,7 +202,11 @@ impl xcm_executor::Config for XcmConfig {
 	type IsTeleporter = TrustedTeleporters;
 	type UniversalLocation = UniversalLocation;
 	type Barrier = Barrier;
-	type Weigher = FixedWeightBounds<TempFixedXcmWeight, RuntimeCall, MaxInstructions>;
+	type Weigher = WeightInfoBounds<
+		crate::weights::xcm::CollectivesWestendXcmWeight<RuntimeCall>,
+		RuntimeCall,
+		MaxInstructions,
+	>;
 	type Trader = UsingComponents<
 		WeightToFee,
 		WndLocation,
@@ -275,7 +273,11 @@ impl pallet_xcm::Config for Runtime {
 	type XcmExecutor = XcmExecutor<XcmConfig>;
 	type XcmTeleportFilter = Everything;
 	type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location.
-	type Weigher = FixedWeightBounds<BaseXcmWeight, RuntimeCall, MaxInstructions>;
+	type Weigher = WeightInfoBounds<
+		crate::weights::xcm::CollectivesWestendXcmWeight<RuntimeCall>,
+		RuntimeCall,
+		MaxInstructions,
+	>;
 	type UniversalLocation = UniversalLocation;
 	type RuntimeOrigin = RuntimeOrigin;
 	type RuntimeCall = RuntimeCall;
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml
index cb0655d70cf29e4679b5ade41794e0c5089f8568..067c4df3b53654f28fdc40e0ac58a480d7fdfef3 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml
@@ -70,11 +70,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-message-queue = { workspace = true }
 
@@ -90,11 +90,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
@@ -148,6 +148,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -179,6 +180,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
index 594c9b26f57e8eadca03f13c7536170edbe7b51a..3348a635df01a63c18369e9258e8f1b3a316bb18 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
@@ -88,17 +88,19 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
@@ -201,6 +203,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = ();
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -402,6 +408,7 @@ construct_runtime!(
 		RandomnessCollectiveFlip: pallet_insecure_randomness_collective_flip = 2,
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -448,6 +455,7 @@ mod benches {
 		[cumulus_pallet_parachain_system, ParachainSystem]
 		[pallet_contracts, Contracts]
 		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -849,18 +857,8 @@ impl_runtime_apis! {
 				}
 			}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml
index 2b5fab329293d58f445ba2e30bbacfa5b8305dff..668b4cc6c7b9bd6a358254f1c86b639dbc487cf5 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml
@@ -71,11 +71,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -92,11 +92,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
@@ -154,6 +154,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -186,6 +187,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
index e8f6e6659e13587ad8fe2b6788b3c55a92e06007..e9171c79afaec8b871b9e88d52e283d04b1437fb 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
@@ -99,18 +99,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -221,6 +223,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -622,6 +628,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -672,6 +679,7 @@ mod benches {
 		// NOTE: Make sure you point to the individual modules below.
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -1140,18 +1148,8 @@ impl_runtime_apis! {
 			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
 			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..6298fd6e7f69815e659af5a404b1bc2bf0f41d5b
--- /dev/null
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=coretime-rococo-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_638_000 picoseconds.
+		Weight::from_parts(6_806_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs
index a4d09696a1a116ea5add5d9a1d41b86169bfe272..04b695b5769302f51ce764bfff219900bff107eb 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs
@@ -129,4 +129,18 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 3_687_000 picoseconds.
+		Weight::from_parts(6_192_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
 }
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs
index 24c4f50e6ab8b8c46ae1bfa177c8afada04a795e..7fee4a728b9ef750224efeac78408ca58827d643 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs
@@ -19,6 +19,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml
index 03df782bc266afe39bcd1a430debb531a5adff3e..915926ff9894ed3164dd37aeee828aa7229bb863 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml
@@ -70,11 +70,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 
 pallet-collator-selection = { workspace = true }
@@ -92,11 +92,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
@@ -152,6 +152,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -183,6 +184,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
index ce965f0ad1ba90f0b12c5f0defc64e7788cec686..975856b3b6ff8eeebdc65427d693181eb9919eee 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
@@ -99,18 +99,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -221,6 +223,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -617,6 +623,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -664,6 +671,7 @@ mod benches {
 		// NOTE: Make sure you point to the individual modules below.
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -1135,18 +1143,8 @@ impl_runtime_apis! {
 			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
 			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..55d52f4b04c8788f1bc05472516e1de4e6d65c92
--- /dev/null
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=coretime-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_658_000 picoseconds.
+		Weight::from_parts(6_905_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs
index d928b73613a3f110dc373ea91070377b644c4bbb..9527e0c5549a788b5d5995aa65a0ed30c909f426 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs
@@ -129,4 +129,18 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 3_687_000 picoseconds.
+		Weight::from_parts(6_192_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
 }
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs
index 24c4f50e6ab8b8c46ae1bfa177c8afada04a795e..7fee4a728b9ef750224efeac78408ca58827d643 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs
@@ -19,6 +19,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs
index 763f8abea34a7ae965af26189248bdac04de7d7f..75f45297fe2cdade064fe3478ff1979aecb11ee2 100644
--- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs
@@ -300,6 +300,7 @@ pub type TxExtension = (
 	frame_system::CheckEra<Runtime>,
 	frame_system::CheckNonce<Runtime>,
 	frame_system::CheckWeight<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs
index 4fbbb8d6f781d97aeedfa7d84779190464ff70a2..db9a14e2cf242ab40c4fb68aaa1a90fbe4605501 100644
--- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs
@@ -16,28 +16,30 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024
+//! HOSTNAME: `697235d969a1`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --wasm-execution=compiled
+// --extrinsic=*
+// --runtime=target/release/wbuild/glutton-westend-runtime/glutton_westend_runtime.wasm
 // --pallet=frame_system_extensions
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
 // --no-storage-info
-// --no-median-slopes
 // --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./cumulus/file_header.txt
-// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/
-// --chain=glutton-westend-dev-1300
+// --no-median-slopes
+// --genesis-builder-policy=none
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -54,10 +56,10 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `54`
+		//  Measured:  `0`
 		//  Estimated: `3509`
-		// Minimum execution time: 3_908_000 picoseconds.
-		Weight::from_parts(4_007_000, 0)
+		// Minimum execution time: 2_572_000 picoseconds.
+		Weight::from_parts(2_680_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -65,10 +67,10 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
+		//  Measured:  `0`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_510_000 picoseconds.
-		Weight::from_parts(6_332_000, 0)
+		// Minimum execution time: 5_818_000 picoseconds.
+		Weight::from_parts(6_024_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -76,10 +78,10 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
+		//  Measured:  `14`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_510_000 picoseconds.
-		Weight::from_parts(6_332_000, 0)
+		// Minimum execution time: 7_364_000 picoseconds.
+		Weight::from_parts(7_676_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -87,44 +89,52 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 651_000 picoseconds.
-		Weight::from_parts(851_000, 0)
+		// Minimum execution time: 657_000 picoseconds.
+		Weight::from_parts(686_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_387_000 picoseconds.
-		Weight::from_parts(3_646_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Estimated: `3529`
+		// Minimum execution time: 6_931_000 picoseconds.
+		Weight::from_parts(7_096_000, 0)
+			.saturating_add(Weight::from_parts(0, 3529))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 491_000 picoseconds.
-		Weight::from_parts(651_000, 0)
+		// Minimum execution time: 518_000 picoseconds.
+		Weight::from_parts(539_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 451_000 picoseconds.
-		Weight::from_parts(662_000, 0)
+		// Minimum execution time: 530_000 picoseconds.
+		Weight::from_parts(550_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
-	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `24`
-		//  Estimated: `1489`
-		// Minimum execution time: 3_537_000 picoseconds.
-		Weight::from_parts(4_208_000, 0)
-			.saturating_add(Weight::from_parts(0, 1489))
-			.saturating_add(T::DbWeight::get().reads(1))
-			.saturating_add(T::DbWeight::get().writes(1))
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 5_691_000 picoseconds.
+		Weight::from_parts(5_955_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+	}
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 3_249_000 picoseconds.
+		Weight::from_parts(3_372_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 }
diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml
index de2898046c0d75b4ce0613c58e675747795c2302..6391f8c3eeb904586c9a0bc8449eb546218846da 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml
@@ -68,11 +68,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -89,11 +89,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"enumflags2/std",
 	"frame-benchmarking?/std",
@@ -150,6 +150,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -182,6 +183,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
index b8db687da62525c7a190ad6c143522ff4be60833..ffdd86c500e5b4da5eb56a3fb7f16c16083745cb 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
@@ -92,17 +92,19 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -196,6 +198,10 @@ impl frame_system::Config for Runtime {
 	type MultiBlockMigrator = MultiBlockMigrations;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -567,6 +573,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -626,6 +633,7 @@ mod benches {
 		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -1055,18 +1063,8 @@ impl_runtime_apis! {
 			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
 			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..439855f857192a7c729f5aa49e2b90a1aabd7407
--- /dev/null
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=people-rococo-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 7_097_000 picoseconds.
+		Weight::from_parts(7_419_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs
index fb2b69e23e82b690e225517377aafd54fc72a240..3f12b25540ea669dff95ba4e8c9c87429a372af0 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs
@@ -129,4 +129,18 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 3_687_000 picoseconds.
+		Weight::from_parts(6_192_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
 }
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs
index fab3c629ab3f31363233cf289c2abd1b96a59997..81906a11fe1c9ea7a9fde28cea1734bd2dd97252 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs
@@ -17,6 +17,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml
index 65bc8264934f65f9037ff041640d431f99e67ae4..fae0fd2e33324e500d75983ce09db276450fa79c 100644
--- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml
@@ -68,11 +68,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -89,11 +89,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"enumflags2/std",
 	"frame-benchmarking?/std",
@@ -150,6 +150,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -182,6 +183,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
index 620ec41c071cd7061bf946befb15354f3ad09d4b..ee6b0db55b91ad02155225bf8f72f6c00123886b 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
@@ -92,17 +92,19 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The transactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -195,6 +197,10 @@ impl frame_system::Config for Runtime {
 	type MultiBlockMigrator = MultiBlockMigrations;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -566,6 +572,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -624,6 +631,7 @@ mod benches {
 		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -1055,18 +1063,8 @@ impl_runtime_apis! {
 			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
 			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..fd3018ec9740111167a2e66189820aff72765ddd
--- /dev/null
+++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=people-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 7_006_000 picoseconds.
+		Weight::from_parts(7_269_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs
index 0a4b9e8e26812bfe82aaaccd3dc19221d7327e37..422b8566ad08a58a6f26f7c87e975d235305b3b2 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs
@@ -129,4 +129,18 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 3_687_000 picoseconds.
+		Weight::from_parts(6_192_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
 }
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs
index fab3c629ab3f31363233cf289c2abd1b96a59997..81906a11fe1c9ea7a9fde28cea1734bd2dd97252 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs
@@ -17,6 +17,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
index b51670c792d6badee368c5b176d086cc72bacc04..38ddf3bc1991b4e9541d111a6ec2c1fcf77dca88 100644
--- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
+++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
@@ -140,6 +140,7 @@ pub type TxExtension = (
 	frame_system::CheckNonce<Runtime>,
 	frame_system::CheckWeight<Runtime>,
 	pallet_asset_tx_payment::ChargeAssetTxPayment<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 /// Unchecked extrinsic type as expected by this runtime.
@@ -1132,18 +1133,8 @@ impl_runtime_apis! {
 			use cumulus_pallet_session_benchmarking::Pallet as SessionBench;
 			impl cumulus_pallet_session_benchmarking::Config for Runtime {}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml
index e8761445f161e1f82682b40e51e35b00d37d6ca3..826a2e9764fc1f788e9b6e62d977565d95630e00 100644
--- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml
+++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml
@@ -51,12 +51,12 @@ xcm-executor = { workspace = true }
 # Cumulus
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-ping = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-message-queue = { workspace = true }
 parachain-info = { workspace = true }
@@ -72,12 +72,12 @@ std = [
 	"codec/std",
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-ping/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
@@ -117,6 +117,7 @@ std = [
 ]
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs
index 42556e0b493cd6fb413742a67f7ce08012775213..89cd17d5450ac2af19f6c550e2a4b62abd298dbf 100644
--- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs
+++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs
@@ -226,6 +226,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = ();
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -617,6 +621,7 @@ construct_runtime! {
 		Timestamp: pallet_timestamp,
 		Sudo: pallet_sudo,
 		TransactionPayment: pallet_transaction_payment,
+		WeightReclaim: cumulus_pallet_weight_reclaim,
 
 		ParachainSystem: cumulus_pallet_parachain_system = 20,
 		ParachainInfo: parachain_info = 21,
@@ -657,17 +662,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
+
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs
index 5cbe662e2700924c55814680cda4d0428ad0c450..62ff60811904525c1e6af944776bfaac77b926fa 100644
--- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs
+++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs
@@ -100,15 +100,30 @@ pub fn get_proof_size() -> Option<u64> {
 	(proof_size != PROOF_RECORDING_DISABLED).then_some(proof_size)
 }
 
-/// Storage weight reclaim mechanism.
-///
-/// This extension checks the size of the node-side storage proof
-/// before and after executing a given extrinsic. The difference between
-/// benchmarked and spent weight can be reclaimed.
-#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)]
-#[scale_info(skip_type_params(T))]
-pub struct StorageWeightReclaim<T: Config + Send + Sync>(PhantomData<T>);
+// Encapsulate into a mod so that macro generated code doesn't trigger a warning about deprecated
+// usage.
+#[allow(deprecated)]
+mod allow_deprecated {
+	use super::*;
+
+	/// Storage weight reclaim mechanism.
+	///
+	/// This extension checks the size of the node-side storage proof
+	/// before and after executing a given extrinsic. The difference between
+	/// benchmarked and spent weight can be reclaimed.
+	#[deprecated(note = "This extension doesn't provide accurate reclaim for storage intensive \
+		transaction extension pipeline; it ignores the validation and preparation of extensions prior \
+		to itself and ignores the post dispatch logic for extensions subsequent to itself, it also
+		doesn't provide weight information. \
+		Use `StorageWeightReclaim` in the `cumulus-pallet-weight-reclaim` crate")]
+	#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)]
+	#[scale_info(skip_type_params(T))]
+	pub struct StorageWeightReclaim<T: Config + Send + Sync>(pub(super) PhantomData<T>);
+}
+#[allow(deprecated)]
+pub use allow_deprecated::StorageWeightReclaim;
 
+#[allow(deprecated)]
 impl<T: Config + Send + Sync> StorageWeightReclaim<T> {
 	/// Create a new `StorageWeightReclaim` instance.
 	pub fn new() -> Self {
@@ -116,6 +131,7 @@ impl<T: Config + Send + Sync> StorageWeightReclaim<T> {
 	}
 }
 
+#[allow(deprecated)]
 impl<T: Config + Send + Sync> core::fmt::Debug for StorageWeightReclaim<T> {
 	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
 		let _ = write!(f, "StorageWeightReclaim");
@@ -123,6 +139,7 @@ impl<T: Config + Send + Sync> core::fmt::Debug for StorageWeightReclaim<T> {
 	}
 }
 
+#[allow(deprecated)]
 impl<T: Config + Send + Sync> TransactionExtension<T::RuntimeCall> for StorageWeightReclaim<T>
 where
 	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
diff --git a/cumulus/primitives/storage-weight-reclaim/src/tests.rs b/cumulus/primitives/storage-weight-reclaim/src/tests.rs
index ab83762cc0db1fb0947a6414089a0e3be463adcc..379b39afee0c1b33fdcf85aaea099de3190e8387 100644
--- a/cumulus/primitives/storage-weight-reclaim/src/tests.rs
+++ b/cumulus/primitives/storage-weight-reclaim/src/tests.rs
@@ -74,6 +74,7 @@ fn get_storage_weight() -> PerDispatchClass<Weight> {
 }
 
 #[test]
+#[allow(deprecated)]
 fn basic_refund() {
 	// The real cost will be 100 bytes of storage size
 	let mut test_ext = setup_test_externalities(&[0, 100]);
@@ -109,6 +110,7 @@ fn basic_refund() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn underestimating_refund() {
 	// We fixed a bug where `pre dispatch info weight > consumed weight > post info weight`
 	// resulted in error.
@@ -149,6 +151,7 @@ fn underestimating_refund() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn sets_to_node_storage_proof_if_higher() {
 	// The storage proof reported by the proof recorder is higher than what is stored on
 	// the runtime side.
@@ -240,6 +243,7 @@ fn sets_to_node_storage_proof_if_higher() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn does_nothing_without_extension() {
 	let mut test_ext = new_test_ext();
 
@@ -274,6 +278,7 @@ fn does_nothing_without_extension() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn negative_refund_is_added_to_weight() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -310,6 +315,7 @@ fn negative_refund_is_added_to_weight() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_zero_proof_size() {
 	let mut test_ext = setup_test_externalities(&[0, 0]);
 
@@ -340,6 +346,7 @@ fn test_zero_proof_size() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_larger_pre_dispatch_proof_size() {
 	let mut test_ext = setup_test_externalities(&[300, 100]);
 
@@ -374,6 +381,7 @@ fn test_larger_pre_dispatch_proof_size() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_incorporates_check_weight_unspent_weight() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -415,6 +423,7 @@ fn test_incorporates_check_weight_unspent_weight() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_incorporates_check_weight_unspent_weight_on_negative() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -456,6 +465,7 @@ fn test_incorporates_check_weight_unspent_weight_on_negative() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_nothing_relcaimed() {
 	let mut test_ext = setup_test_externalities(&[0, 100]);
 
@@ -505,6 +515,7 @@ fn test_nothing_relcaimed() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_incorporates_check_weight_unspent_weight_reverse_order() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -548,6 +559,7 @@ fn test_incorporates_check_weight_unspent_weight_reverse_order() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -616,6 +628,7 @@ fn storage_size_disabled_reported_correctly() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_reclaim_helper() {
 	let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]);
 
@@ -635,6 +648,7 @@ fn test_reclaim_helper() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_reclaim_helper_does_not_reclaim_negative() {
 	// Benchmarked weight does not change at all
 	let mut test_ext = setup_test_externalities(&[1000, 1300]);
@@ -669,6 +683,7 @@ fn get_benched_weight() -> Weight {
 /// Just here for doc purposes
 fn do_work() {}
 
+#[allow(deprecated)]
 #[docify::export_content(simple_reclaimer_example)]
 fn reclaim_with_weight_meter() {
 	let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10));
diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml
index 2c72ca98f35a07a8c148559530ad2e7ee327a958..f64ee832ace3be5343a53e7b68d3634e79021bef 100644
--- a/cumulus/test/client/Cargo.toml
+++ b/cumulus/test/client/Cargo.toml
@@ -39,16 +39,17 @@ polkadot-parachain-primitives = { workspace = true, default-features = true }
 polkadot-primitives = { workspace = true, default-features = true }
 
 # Cumulus
+cumulus-pallet-weight-reclaim = { workspace = true, default-features = true }
 cumulus-primitives-core = { workspace = true, default-features = true }
 cumulus-primitives-parachain-inherent = { workspace = true, default-features = true }
 cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true }
 cumulus-test-relay-sproof-builder = { workspace = true, default-features = true }
 cumulus-test-runtime = { workspace = true }
 cumulus-test-service = { workspace = true }
 
 [features]
 runtime-benchmarks = [
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-test-service/runtime-benchmarks",
 	"frame-system/runtime-benchmarks",
diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs
index 26cf02b3dea94734f2bdd58f7a16c9ef3640a3c1..7861a42372a6519f57ccf84383b8383e4cb40b85 100644
--- a/cumulus/test/client/src/lib.rs
+++ b/cumulus/test/client/src/lib.rs
@@ -143,7 +143,6 @@ pub fn generate_extrinsic_with_pair(
 		frame_system::CheckNonce::<Runtime>::from(nonce),
 		frame_system::CheckWeight::<Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::<Runtime>::new(),
 	)
 		.into();
 
@@ -152,7 +151,7 @@ pub fn generate_extrinsic_with_pair(
 	let raw_payload = SignedPayload::from_raw(
 		function.clone(),
 		tx_ext.clone(),
-		((), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()),
+		((), VERSION.spec_version, genesis_block, current_block_hash, (), (), ()),
 	);
 	let signature = raw_payload.using_encoded(|e| origin.sign(e));
 
diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml
index 150838e5e96e774961a004ba699f6db10fc62f49..4cc4f483c0287903f12c76a88d9c0bacdbc5ec0e 100644
--- a/cumulus/test/runtime/Cargo.toml
+++ b/cumulus/test/runtime/Cargo.toml
@@ -44,9 +44,9 @@ sp-version = { workspace = true }
 # Cumulus
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
 
@@ -59,9 +59,9 @@ std = [
 	"codec/std",
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"frame-executive/std",
 	"frame-support/std",
 	"frame-system-rpc-runtime-api/std",
diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs
index 4abc10276af1852f8fcd272ae3ea3486355dab93..01ce3427c1f19b21663bd7b940c1fa95e212c48b 100644
--- a/cumulus/test/runtime/src/lib.rs
+++ b/cumulus/test/runtime/src/lib.rs
@@ -232,6 +232,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = ();
+}
+
 parameter_types! {
 	pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
 	pub const PotId: PalletId = PalletId(*b"PotStake");
@@ -347,6 +351,7 @@ construct_runtime! {
 		Glutton: pallet_glutton,
 		Aura: pallet_aura,
 		AuraExt: cumulus_pallet_aura_ext,
+		WeightReclaim: cumulus_pallet_weight_reclaim,
 	}
 }
 
@@ -377,16 +382,18 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml
index b3d92444c7d1ff90e8ac02423c9dc2b7509cbf72..794007532621e196066ada89fdae09f9e4f09247 100644
--- a/cumulus/test/service/Cargo.toml
+++ b/cumulus/test/service/Cargo.toml
@@ -81,8 +81,8 @@ cumulus-client-parachain-inherent = { workspace = true, default-features = true
 cumulus-client-pov-recovery = { workspace = true, default-features = true }
 cumulus-client-service = { workspace = true, default-features = true }
 cumulus-pallet-parachain-system = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true, default-features = true }
 cumulus-primitives-core = { workspace = true, default-features = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true }
 cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true }
 cumulus-relay-chain-interface = { workspace = true, default-features = true }
 cumulus-relay-chain-minimal-node = { workspace = true, default-features = true }
@@ -107,6 +107,7 @@ substrate-test-utils = { workspace = true }
 [features]
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-test-client/runtime-benchmarks",
 	"frame-system/runtime-benchmarks",
diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs
index 2c13d20333a73f007ff01ea1a037cbdc2f6082ad..f3f04cbb63835fb2dc2926b1e3b50a320e39794d 100644
--- a/cumulus/test/service/src/lib.rs
+++ b/cumulus/test/service/src/lib.rs
@@ -976,13 +976,12 @@ pub fn construct_extrinsic(
 		frame_system::CheckNonce::<runtime::Runtime>::from(nonce),
 		frame_system::CheckWeight::<runtime::Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(tip),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::<runtime::Runtime>::new(),
 	)
 		.into();
 	let raw_payload = runtime::SignedPayload::from_raw(
 		function.clone(),
 		tx_ext.clone(),
-		((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()),
+		((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), ()),
 	);
 	let signature = raw_payload.using_encoded(|e| caller.sign(e));
 	runtime::UncheckedExtrinsic::new_signed(
diff --git a/docs/RELEASE.md b/docs/RELEASE.md
index bea36741135969fb216fbe559709d8b43a1c4b35..677cb5465b67f179f7a90f861f1f5e9dddded7b5 100644
--- a/docs/RELEASE.md
+++ b/docs/RELEASE.md
@@ -14,7 +14,11 @@ Merging to it is restricted to [Backports](#backports).
 
 We are releasing multiple different things from this repository in one release, but we don't want to use the same
 version for everything. Thus, in the following we explain the versioning story for the crates, node and Westend &
-Rococo. To easily refer to a release, it shall be named by its date in the form `stableYYMMDD`.
+Rococo.
+
+To easily refer to a release, it shall be named by its date in the form `stableYYMM`. Patches to stable releases are
+tagged in the form of `stableYYMM-PATCH`, with `PATCH` ranging from 1 to 99. For example, the fourth patch to
+`stable2409` would be `stable2409-4`.
 
 ## Crate
 
diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md
index 4a1a3c1f06880086d334b1bb5b1688e7d76aa455..1f6252425e69538c5d724a89dbac414cb1174aab 100644
--- a/docs/contributor/prdoc.md
+++ b/docs/contributor/prdoc.md
@@ -1,73 +1,88 @@
 # PRDoc
 
-A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use this approach to
-record changes on a crate level. This information is then processed by the release team to apply the correct crate
-version bumps and to generate the CHANGELOG of the next release.
+A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use
+this approach to record changes on a crate level. This information is then processed by the release
+team to apply the correct crate version bumps and to generate the CHANGELOG of the next release.
 
 ## Requirements
 
-When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to contain a prdoc. The
-`R0` label should only be placed for No-OP changes like correcting a typo in a comment or CI stuff. If unsure, ping
-the [CODEOWNERS](../../.github/CODEOWNERS) for advice.
+When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to
+contain a prdoc. The `R0` label should only be placed for No-OP changes like correcting a typo in a
+comment or CI stuff. If unsure, ping the [CODEOWNERS](../../.github/CODEOWNERS) for advice.
 
-## PRDoc How-To
+## Auto Generation
 
-A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps to generate one:
-
-1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install parity-prdoc`.
-1. Open a Pull Request and get the PR number.
-1. Generate the file with `prdoc generate <PR_NUMBER>`. The output filename will be printed.
-1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example
-   [VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas).
-1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and [SemVer](#record-semver-changes) sections.
-1. Check your prdoc with `prdoc check -n <PR_NUMBER>`. This is optional since the CI will also check it.
-
-> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct file:  
-> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc`
-
-Alternatively you can call the prdoc from PR via `/cmd prdoc` (see args with `/cmd prdoc --help`)
-in a comment to PR to trigger it from CI.
+You can create a PrDoc by using the `/cmd prdoc` command (see args with `/cmd prdoc --help`) in a
+comment on your PR.
 
 Options:
 
-- `pr`: The PR number to generate the PrDoc for.
-- `audience`: The audience of whom the changes may concern.
-- `bump`: A default bump level for all crates.
-  The PrDoc will likely need to be edited to reflect the actual changes after generation.
-- `force`: Whether to overwrite any existing PrDoc.
+- `audience` The audience of whom the changes may concern.
+  - `runtime_dev`: Anyone building a runtime themselves. For example parachain teams, or people
+    providing template runtimes. Also devs using pallets, FRAME etc directly. These are people who
+    care about the protocol (WASM), not the meta-protocol (client).
+  - `runtime_user`: Anyone using the runtime. Can be front-end devs reading the state, exchanges
+    listening for events, libraries that have hard-coded pallet indices etc. Anything that would
+    result in an observable change to the runtime behaviour must be marked with this.
+  - `node_dev`: Those who build around the client side code. Alternative client builders, SMOLDOT,
+  those who consume RPCs. These are people who are oblivious to the runtime changes. They only care
+  about the meta-protocol, not the protocol itself.
+  - `node_operator`: People who run the node. Think of validators, exchanges, indexer services, CI
+    actions. Anything that modifies how the binary behaves (its arguments, default arguments, error
+    messags, etc) must be marked with this.
+- `bump:`: The default bump level for all crates. The PrDoc will likely need to be edited to reflect
+  the actual changes after generation. More details in the section below.
+  - `none`: There is no observable change. So to say: if someone were handed the old and the new
+    version of our software, it would be impossible to figure out what version is which.
+  - `patch`: Fixes that will never cause compilation errors if someone updates to this version. No
+    functionality has been changed. Should be limited to fixing bugs or No-OP implementation
+    changes.
+  - `minor`: Additions that will never cause compilation errors if someone updates to this version.
+    No functionality has been changed. Should be limited to adding new features.
+  - `major`: Anything goes.
+- `force: true|false`: Whether to overwrite any existing PrDoc file.
 
-## Pick An Audience
-
-While describing a PR, the author needs to consider which audience(s) need to be addressed.
-The list of valid audiences is described and documented in the JSON schema as follow:
+### Example
 
-- `Node Dev`: Those who build around the client side code. Alternative client builders, SMOLDOT, those who consume RPCs.
-  These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol
-  itself.
+```bash
+/cmd prdoc --audience runtime_dev --bump patch
+```
 
-- `Runtime Dev`: All of those who rely on the runtime. A parachain team that is using a pallet. A DApp that is using a
-  pallet. These are people who care about the protocol (WASM), not the meta-protocol (client).
+## Local Generation
 
-- `Node Operator`: Those who don't write any code and only run code.
+A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps
+to generate one:
 
-- `Runtime User`: Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain.
+1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install
+   parity-prdoc`.
+1. Open a Pull Request and get the PR number.
+1. Generate the file with `prdoc generate <PR_NUMBER>`. The output filename will be printed.
+1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example
+   [VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas).
+1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and
+   [SemVer](#record-semver-changes) sections.
+1. Check your prdoc with `prdoc check -n <PR_NUMBER>`. This is optional since the CI will also check
+   it.
 
-If you have a change that affects multiple audiences, you can either list them all, or write multiple sections and
-re-phrase the changes for each audience.
+> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct
+> file:  
+> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc`
 
 ## Record SemVer Changes
 
-All published crates that got modified need to have an entry in the `crates` section of your `PRDoc`. This entry tells
-the release team how to bump the crate version prior to the next release. It is very important that this information is
-correct, otherwise it could break the code of downstream teams.
+All published crates that got modified need to have an entry in the `crates` section of your
+`PRDoc`. This entry tells the release team how to bump the crate version prior to the next release.
+It is very important that this information is correct, otherwise it could break the code of
+downstream teams.
 
 The bump can either be `major`, `minor`, `patch` or `none`. The three first options are defined by
-[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be picked if no other
-applies. The `None` option is equivalent to the `R0-silent` label, but on a crate level. Experimental and private APIs
-are exempt from bumping and can be broken at any time. Please read the [Crate Section](../RELEASE.md) of the RELEASE doc
-about them.
+[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be
+picked if no other applies. The `None` option is equivalent to the `R0-silent` label, but on a crate
+level. Experimental and private APIs are exempt from bumping and can be broken at any time. Please
+read the [Crate Section](../RELEASE.md) of the RELEASE doc about them.
 
-> **Note**: There is currently no CI in place to sanity check this information, but should be added soon.
+> **Note**: There is currently no CI in place to sanity check this information, but should be added
+> soon.
 
 ### Example
 
@@ -81,12 +96,13 @@ crates:
     bump: minor
 ```
 
-It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, while code using
-`frame-example` might break.
+It means that downstream code using `frame-example-pallet` is still guaranteed to work as before,
+while code using `frame-example` might break.
 
 ### Dependencies
 
-A crate that depends on another crate will automatically inherit its `major` bumps. This means that you do not need to
-bump a crate that had a SemVer breaking change only from re-exporting another crate with a breaking change.  
-`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them to the latest
-compatible version.
+A crate that depends on another crate will automatically inherit its `major` bumps. This means that
+you do not need to bump a crate that had a SemVer breaking change only from re-exporting another
+crate with a breaking change.  
+`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them
+to the latest compatible version.
diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml
index a856e94f42b51ea3417775e6a694e688134d70ab..4d83e2045ab01cfdb55f74cb61d841168d4143e2 100644
--- a/docs/sdk/Cargo.toml
+++ b/docs/sdk/Cargo.toml
@@ -68,8 +68,8 @@ substrate-wasm-builder = { workspace = true, default-features = true }
 cumulus-client-service = { workspace = true, default-features = true }
 cumulus-pallet-aura-ext = { workspace = true, default-features = true }
 cumulus-pallet-parachain-system = { workspace = true, default-features = true }
+cumulus-pallet-weight-reclaim = { workspace = true, default-features = true }
 cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true }
 parachain-info = { workspace = true, default-features = true }
 
 # Omni Node
@@ -110,6 +110,7 @@ sp-offchain = { workspace = true, default-features = true }
 sp-runtime = { workspace = true, default-features = true }
 sp-runtime-interface = { workspace = true, default-features = true }
 sp-std = { workspace = true, default-features = true }
+sp-storage = { workspace = true, default-features = true }
 sp-tracing = { workspace = true, default-features = true }
 sp-version = { workspace = true, default-features = true }
 sp-weights = { workspace = true, default-features = true }
diff --git a/docs/sdk/src/guides/enable_pov_reclaim.rs b/docs/sdk/src/guides/enable_pov_reclaim.rs
index cb6960b3df4ef1b5a507eb083baba808c9e99a0f..71abeacd18c8e024369159e3a10ee3f846df842b 100644
--- a/docs/sdk/src/guides/enable_pov_reclaim.rs
+++ b/docs/sdk/src/guides/enable_pov_reclaim.rs
@@ -62,8 +62,10 @@
 //!
 //! In your runtime, you will find a list of TransactionExtensions.
 //! To enable the reclaiming,
-//! add [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim)
-//! to that list. For maximum efficiency, make sure that `StorageWeightReclaim` is last in the list.
+//! set [`StorageWeightReclaim`](cumulus_pallet_weight_reclaim::StorageWeightReclaim)
+//! as a warpper of that list.
+//! It is necessary that this extension wraps all the other transaction extensions in order to catch
+//! the whole PoV size of the transactions.
 //! The extension will check the size of the storage proof before and after an extrinsic execution.
 //! It reclaims the difference between the calculated size and the benchmarked size.
 #![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", template_signed_extra)]
diff --git a/docs/sdk/src/polkadot_sdk/frame_runtime.rs b/docs/sdk/src/polkadot_sdk/frame_runtime.rs
index 8acf19f7641379c3fdfda62ce34c5799179d0b4a..24595e445fdd6e48377d62a69aff6ab5bab8daad 100644
--- a/docs/sdk/src/polkadot_sdk/frame_runtime.rs
+++ b/docs/sdk/src/polkadot_sdk/frame_runtime.rs
@@ -57,6 +57,7 @@
 //! The following example showcases a minimal pallet.
 #![doc = docify::embed!("src/polkadot_sdk/frame_runtime.rs", pallet)]
 //!
+//! ## Runtime
 //!
 //! A runtime is a collection of pallets that are amalgamated together. Each pallet typically has
 //! some configurations (exposed as a `trait Config`) that needs to be *specified* in the runtime.
diff --git a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs
index 68d7d31f67f3e855ec66a7d9d27b281bfe8a46c8..98192bfd2a905ff8de709f19b798bba89ad3aae8 100644
--- a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs
+++ b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs
@@ -96,7 +96,7 @@
 //! Two ways exist to run the benchmarks of a runtime.
 //!
 //! 1. The old school way: Most Polkadot-SDK based nodes (such as the ones integrated in
-//!    [`templates`]) have an a `benchmark` subcommand integrated into themselves.
+//!    [`templates`]) have a `benchmark` subcommand integrated into themselves.
 //! 2. The more [`crate::reference_docs::omni_node`] compatible way of running the benchmarks would
 //!    be using [`frame-omni-bencher`] CLI, which only relies on a runtime.
 //!
diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs
index e47eece784c4ce6b34ff9507873e0df2495dd912..7ad8a37241bf5ce58047572701a82d169116e8d9 100644
--- a/docs/sdk/src/reference_docs/mod.rs
+++ b/docs/sdk/src/reference_docs/mod.rs
@@ -111,3 +111,6 @@ pub mod custom_runtime_api_rpc;
 
 /// The [`polkadot-omni-node`](https://crates.io/crates/polkadot-omni-node) and its related binaries.
 pub mod omni_node;
+
+/// Learn about the state in Substrate.
+pub mod state;
diff --git a/docs/sdk/src/reference_docs/state.rs b/docs/sdk/src/reference_docs/state.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a8138caebf1eb2913b36bb9549bd4f1998908575
--- /dev/null
+++ b/docs/sdk/src/reference_docs/state.rs
@@ -0,0 +1,12 @@
+//! # State
+//!
+//! The state is abstracted as a key-value like database. Every item that
+//! needs to be persisted by the [State Transition
+//! Function](crate::reference_docs::blockchain_state_machines) is written to the state.
+//!
+//! ## Special keys
+//!
+//! The key-value pairs in the state are represented as byte sequences. The node
+//! doesn't know how to interpret most the key-value pairs. However, there exist some
+//! special keys and its values that are known to the node, the so-called
+//! [`well-known-keys`](sp_storage::well_known_keys).
diff --git a/docs/sdk/src/reference_docs/transaction_extensions.rs b/docs/sdk/src/reference_docs/transaction_extensions.rs
index 0f8198e8372d35975f69d811d545bf20b900407f..fe213458b25c0400a8aef014398fa6d04b7a2f2b 100644
--- a/docs/sdk/src/reference_docs/transaction_extensions.rs
+++ b/docs/sdk/src/reference_docs/transaction_extensions.rs
@@ -47,9 +47,11 @@
 //!   to include the so-called metadata hash. This is required by chains to support the generic
 //!   Ledger application and other similar offline wallets.
 //!
-//! - [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim): A
-//!   transaction extension for parachains that reclaims unused storage weight after executing a
-//!   transaction.
+//! - [`WeightReclaim`](frame_system::WeightReclaim): A transaction extension for the relay chain
+//!   that reclaims unused weight after executing a transaction.
+//!
+//! - [`StorageWeightReclaim`](cumulus_pallet_weight_reclaim::StorageWeightReclaim): A transaction
+//!   extension for parachains that reclaims unused storage weight after executing a transaction.
 //!
 //! For more information about these extensions, follow the link to the type documentation.
 //!
diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs
index d891af01c3ab85621c72c4c0e6beda52335387da..a5d42d9fd6e6520087dcad75b862917b60047a07 100644
--- a/polkadot/node/core/approval-voting/src/persisted_entries.rs
+++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs
@@ -561,7 +561,7 @@ impl BlockEntry {
 		self.distributed_assignments.resize(new_len, false);
 		self.distributed_assignments |= bitfield;
 
-		// If the an operation did not change our current bitfield, we return true.
+		// If an operation did not change our current bitfield, we return true.
 		let distributed = total_one_bits == self.distributed_assignments.count_ones();
 
 		distributed
diff --git a/polkadot/node/core/pvf-checker/src/interest_view.rs b/polkadot/node/core/pvf-checker/src/interest_view.rs
index 05a6f12de5d8fbe2a0f1529af1dbbb742ecca8c9..617d0e0b5d88dd377ec9eeadaa8a53725daddf9d 100644
--- a/polkadot/node/core/pvf-checker/src/interest_view.rs
+++ b/polkadot/node/core/pvf-checker/src/interest_view.rs
@@ -58,7 +58,7 @@ impl PvfData {
 		Self { judgement: None, seen_in }
 	}
 
-	/// Mark a the `PvfData` as seen in the provided relay-chain block referenced by `relay_hash`.
+	/// Mark the `PvfData` as seen in the provided relay-chain block referenced by `relay_hash`.
 	pub fn seen_in(&mut self, relay_hash: Hash) {
 		self.seen_in.insert(relay_hash);
 	}
diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs
index 323b2cb08fecc37dc17bc1ef4bd570a00ab7f701..5d79260e3ad2579a88894170c596f9e670d2bd66 100644
--- a/polkadot/node/network/approval-distribution/src/tests.rs
+++ b/polkadot/node/network/approval-distribution/src/tests.rs
@@ -1255,7 +1255,7 @@ fn import_approval_happy_path_v1_v2_peers() {
 				}
 			);
 
-			// send the an approval from peer_b
+			// send an approval from peer_b
 			let approval = IndirectSignedApprovalVoteV2 {
 				block_hash: hash,
 				candidate_indices: candidate_index.into(),
@@ -1385,7 +1385,7 @@ fn import_approval_happy_path_v2() {
 				}
 			);
 
-			// send the an approval from peer_b
+			// send an approval from peer_b
 			let approval = IndirectSignedApprovalVoteV2 {
 				block_hash: hash,
 				candidate_indices,
@@ -1893,7 +1893,7 @@ fn import_approval_bad() {
 				.unwrap()
 				.unwrap();
 
-			// send the an approval from peer_b, we don't have an assignment yet
+			// send an approval from peer_b, we don't have an assignment yet
 			let approval = IndirectSignedApprovalVoteV2 {
 				block_hash: hash,
 				candidate_indices: candidate_index.into(),
@@ -4172,7 +4172,7 @@ fn import_versioned_approval() {
 				}
 			);
 
-			// send the an approval from peer_a
+			// send an approval from peer_a
 			let approval = IndirectSignedApprovalVote {
 				block_hash: hash,
 				candidate_index,
diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs
index 0cf16edc03cc9c79f6163d3c1249be6a65fe2605..5b814a22d2f877d2772790188ee81ba8877ae1cb 100644
--- a/polkadot/node/service/src/benchmarking.rs
+++ b/polkadot/node/service/src/benchmarking.rs
@@ -155,6 +155,7 @@ fn westend_sign_call(
 		frame_system::CheckWeight::<runtime::Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(0),
 		frame_metadata_hash_extension::CheckMetadataHash::<runtime::Runtime>::new(false),
+		frame_system::WeightReclaim::<runtime::Runtime>::new(),
 	)
 		.into();
 
@@ -171,6 +172,7 @@ fn westend_sign_call(
 			(),
 			(),
 			None,
+			(),
 		),
 	);
 
@@ -210,6 +212,7 @@ fn rococo_sign_call(
 		frame_system::CheckWeight::<runtime::Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(0),
 		frame_metadata_hash_extension::CheckMetadataHash::<runtime::Runtime>::new(false),
+		frame_system::WeightReclaim::<runtime::Runtime>::new(),
 	)
 		.into();
 
@@ -226,6 +229,7 @@ fn rococo_sign_call(
 			(),
 			(),
 			None,
+			(),
 		),
 	);
 
diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs
index 227bc52539946eb95dee4ec7a1b01505460f8487..820cce8d083a63903d7887247990545bce114315 100644
--- a/polkadot/node/service/src/lib.rs
+++ b/polkadot/node/service/src/lib.rs
@@ -944,14 +944,9 @@ pub fn new_full<
 				secure_validator_mode,
 				prep_worker_path,
 				exec_worker_path,
-				pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or_else(
-					|| match config.chain_spec.identify_chain() {
-						// The intention is to use this logic for gradual increasing from 2 to 4
-						// of this configuration chain by chain until it reaches production chain.
-						Chain::Polkadot | Chain::Kusama => 2,
-						Chain::Rococo | Chain::Westend | Chain::Unknown => 4,
-					},
-				),
+				// Default execution workers is 4 because we have 8 cores on the reference hardware,
+				// and this accounts for 50% of that cpu capacity.
+				pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or(4),
 				pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1),
 				pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2),
 			})
diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs
index f34bb62a7cf082aa5b51e9f027bbabd7dcb2eaa2..75fd0d9af3013c944a7feaf85a884ba212f42f23 100644
--- a/polkadot/node/test/service/src/lib.rs
+++ b/polkadot/node/test/service/src/lib.rs
@@ -423,6 +423,7 @@ pub fn construct_extrinsic(
 		frame_system::CheckNonce::<Runtime>::from(nonce),
 		frame_system::CheckWeight::<Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
+		frame_system::WeightReclaim::<Runtime>::new(),
 	)
 		.into();
 	let raw_payload = SignedPayload::from_raw(
@@ -437,6 +438,7 @@ pub fn construct_extrinsic(
 			(),
 			(),
 			(),
+			(),
 		),
 	);
 	let signature = raw_payload.using_encoded(|e| caller.sign(e));
diff --git a/polkadot/primitives/src/v8/mod.rs b/polkadot/primitives/src/v8/mod.rs
index fdcb9fe8fb7e344f1574ff156cd2fe4462bf39ef..7fc4c5b5c3f1d4a21b3addd50d396c50e95bd77d 100644
--- a/polkadot/primitives/src/v8/mod.rs
+++ b/polkadot/primitives/src/v8/mod.rs
@@ -1900,7 +1900,7 @@ pub struct SessionInfo {
 	/// participating in parachain consensus. See
 	/// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148).
 	///
-	/// `SessionInfo::validators` will be limited to to `max_validators` when set.
+	/// `SessionInfo::validators` will be limited to `max_validators` when set.
 	pub validators: IndexedVec<ValidatorIndex, ValidatorId>,
 	/// Validators' authority discovery keys for the session in canonical ordering.
 	///
diff --git a/polkadot/roadmap/implementers-guide/src/architecture.md b/polkadot/roadmap/implementers-guide/src/architecture.md
index b75270662005005f4b0a1102b4d95b3d9af2dab0..e2be92e4cddbcc014c58ab1d8223c6c0d0b3e9fd 100644
--- a/polkadot/roadmap/implementers-guide/src/architecture.md
+++ b/polkadot/roadmap/implementers-guide/src/architecture.md
@@ -93,7 +93,7 @@ Runtime logic is divided up into Modules and APIs. Modules encapsulate particula
 consist of storage, routines, and entry-points. Routines are invoked by entry points, by other modules, upon block
 initialization or closing. Routines can read and alter the storage of the module. Entry-points are the means by which
 new information is introduced to a module and can limit the origins (user, root, parachain) that they accept being
-called by. Each block in the blockchain contains a set of Extrinsics. Each extrinsic targets a a specific entry point to
+called by. Each block in the blockchain contains a set of Extrinsics. Each extrinsic targets a specific entry point to
 trigger and which data should be passed to it. Runtime APIs provide a means for Node-side behavior to extract meaningful
 information from the state of a single fork.
 
diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md
index 40394412d81b0d47d1cc96b7a399662b63611176..7e155cdf7d58f56df71be4928afceeb191e5e264 100644
--- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md
+++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md
@@ -406,7 +406,7 @@ Some(core_index), response_sender)`
     * Construct a `IndirectSignedApprovalVote` using the information about the vote.
     * Dispatch `ApprovalDistributionMessage::DistributeApproval`.
   * ELSE
-    * Re-arm the timer with latest tick we have the send a the vote.
+    * Re-arm the timer with latest tick we have then send the vote.
 
 ### Determining Approval of Candidate
 
diff --git a/polkadot/roadmap/implementers-guide/src/protocol-approval.md b/polkadot/roadmap/implementers-guide/src/protocol-approval.md
index b6aa16646ad25f339af3a6b3db9d7b2d65997c4d..25d4fa5dadaef84b940d84a9aba238528bc49caa 100644
--- a/polkadot/roadmap/implementers-guide/src/protocol-approval.md
+++ b/polkadot/roadmap/implementers-guide/src/protocol-approval.md
@@ -84,7 +84,7 @@ slashing risk for validator operators.
 
 In future, we shall determine which among the several hardening techniques best benefits the network as a whole.  We
 could provide a multi-process multi-machine architecture for validators, perhaps even reminiscent of GNUNet, or perhaps
-more resembling smart HSM tooling.  We might instead design a system that more resembled full systems, like like Cosmos'
+more resembling smart HSM tooling.  We might instead design a system that more resembled full systems, like Cosmos'
 sentry nodes.  In either case, approval assignments might be handled by a slightly hardened machine, but not necessarily
 nearly as hardened as approval votes, but approval votes machines must similarly run foreign WASM code, which increases
 their risk, so assignments being separate sounds helpful.
diff --git a/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md b/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md
index 9fd44c00fa0a1c9ba7f5bb0d5abd54bdc55b8e4e..c2861b4035e76885b527e4e53864fe812767ed89 100644
--- a/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md
+++ b/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md
@@ -111,7 +111,7 @@ checking (% for 30-ish malicious in a row).
 There are also censorship or liveness issues if backing is suddenly dominated by malicious nodes but in general even if
 some honest blocks get backed liveness should be preserved.
 
-> **Note:** It is worth noting that is is fundamentally a defense in depth strategy because if we assume disputes are
+> **Note:** It is worth noting that is fundamentally a defense in depth strategy because if we assume disputes are
 > perfect it should not be a real concern. In reality disputes and determinism are difficult to get right, and
 > non-determinism and happen so defense in depth is crucial when handling those subsystems.
 
diff --git a/polkadot/roadmap/implementers-guide/src/runtime/session_info.md b/polkadot/roadmap/implementers-guide/src/runtime/session_info.md
index fa7f55c4f0b41c42270fc4c5f7d6c826f903f20e..daf7e5c7fd80a1b6b438c8f1dc10c70a55db0999 100644
--- a/polkadot/roadmap/implementers-guide/src/runtime/session_info.md
+++ b/polkadot/roadmap/implementers-guide/src/runtime/session_info.md
@@ -16,7 +16,7 @@ struct SessionInfo {
     /// in parachain consensus. See
     /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148).
     ///
-    /// `SessionInfo::validators` will be limited to to `max_validators` when set.
+    /// `SessionInfo::validators` will be limited to `max_validators` when set.
     validators: Vec<ValidatorId>,
     /// Validators' authority discovery keys for the session in canonical ordering.
     ///
diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs
index 07832bba18edb8120fddffbf3099e461f1fa5b1b..aed0729c9d517e820af013f3a9725db37d720939 100644
--- a/polkadot/runtime/common/src/paras_registrar/mod.rs
+++ b/polkadot/runtime/common/src/paras_registrar/mod.rs
@@ -561,15 +561,16 @@ impl<T: Config> Pallet<T> {
 		origin: <T as frame_system::Config>::RuntimeOrigin,
 		id: ParaId,
 	) -> DispatchResult {
-		ensure_signed(origin.clone())
-			.map_err(|e| e.into())
-			.and_then(|who| -> DispatchResult {
-				let para_info = Paras::<T>::get(id).ok_or(Error::<T>::NotRegistered)?;
+		if let Ok(who) = ensure_signed(origin.clone()) {
+			let para_info = Paras::<T>::get(id).ok_or(Error::<T>::NotRegistered)?;
+
+			if para_info.manager == who {
 				ensure!(!para_info.is_locked(), Error::<T>::ParaLocked);
-				ensure!(para_info.manager == who, Error::<T>::NotOwner);
-				Ok(())
-			})
-			.or_else(|_| -> DispatchResult { Self::ensure_root_or_para(origin, id) })
+				return Ok(())
+			}
+		}
+
+		Self::ensure_root_or_para(origin, id)
 	}
 
 	/// Ensure the origin is one of Root or the `para` itself.
@@ -577,14 +578,14 @@ impl<T: Config> Pallet<T> {
 		origin: <T as frame_system::Config>::RuntimeOrigin,
 		id: ParaId,
 	) -> DispatchResult {
-		if let Ok(caller_id) = ensure_parachain(<T as Config>::RuntimeOrigin::from(origin.clone()))
-		{
-			// Check if matching para id...
-			ensure!(caller_id == id, Error::<T>::NotOwner);
-		} else {
-			// Check if root...
-			ensure_root(origin.clone())?;
+		if ensure_root(origin.clone()).is_ok() {
+			return Ok(())
 		}
+
+		let caller_id = ensure_parachain(<T as Config>::RuntimeOrigin::from(origin))?;
+		// Check if matching para id...
+		ensure!(caller_id == id, Error::<T>::NotOwner);
+
 		Ok(())
 	}
 
diff --git a/polkadot/runtime/common/src/paras_registrar/tests.rs b/polkadot/runtime/common/src/paras_registrar/tests.rs
index 252de8f349da9d7f972fe609bded7a172db02801..66fef31c9afd8229d939b83a7e1df910364d597f 100644
--- a/polkadot/runtime/common/src/paras_registrar/tests.rs
+++ b/polkadot/runtime/common/src/paras_registrar/tests.rs
@@ -442,7 +442,7 @@ fn para_lock_works() {
 		// Owner cannot pass origin check when checking lock
 		assert_noop!(
 			mock::Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id),
-			BadOrigin
+			Error::<Test>::ParaLocked,
 		);
 		// Owner cannot remove lock.
 		assert_noop!(mock::Registrar::remove_lock(RuntimeOrigin::signed(1), para_id), BadOrigin);
diff --git a/polkadot/runtime/parachains/src/inclusion/benchmarking.rs b/polkadot/runtime/parachains/src/inclusion/benchmarking.rs
index 1dac3c92cf166cd30c2170d9d8fb142fad6af54f..ab95c5c2366a4d085e98393a58b0ed88495a8b89 100644
--- a/polkadot/runtime/parachains/src/inclusion/benchmarking.rs
+++ b/polkadot/runtime/parachains/src/inclusion/benchmarking.rs
@@ -14,6 +14,14 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
+use bitvec::{bitvec, prelude::Lsb0};
+use frame_benchmarking::v2::*;
+use pallet_message_queue as mq;
+use polkadot_primitives::{
+	vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateCommitments,
+	HrmpChannelId, OutboundHrmpMessage, SessionIndex,
+};
+
 use super::*;
 use crate::{
 	builder::generate_validator_pairs,
@@ -21,13 +29,6 @@ use crate::{
 	hrmp::{HrmpChannel, HrmpChannels},
 	initializer, HeadData, ValidationCode,
 };
-use bitvec::{bitvec, prelude::Lsb0};
-use frame_benchmarking::benchmarks;
-use pallet_message_queue as mq;
-use polkadot_primitives::{
-	vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateCommitments,
-	HrmpChannelId, OutboundHrmpMessage, SessionIndex,
-};
 
 fn create_candidate_commitments<T: crate::hrmp::pallet::Config>(
 	para_id: ParaId,
@@ -70,7 +71,7 @@ fn create_candidate_commitments<T: crate::hrmp::pallet::Config>(
 		BoundedVec::truncate_from(unbounded)
 	};
 
-	let new_validation_code = code_upgrade.then_some(ValidationCode(vec![42u8; 1024]));
+	let new_validation_code = code_upgrade.then_some(ValidationCode(vec![42_u8; 1024]));
 
 	CandidateCommitments::<u32> {
 		upward_messages,
@@ -87,18 +88,13 @@ fn create_messages(msg_len: usize, n_msgs: usize) -> Vec<Vec<u8>> {
 	vec![vec![best_number; msg_len]; n_msgs]
 }
 
-benchmarks! {
-	where_clause {
-		where
-			T: mq::Config + configuration::Config + initializer::Config,
-	}
-
-	enact_candidate {
-		let u in 0 .. 2;
-		let h in 0 .. 2;
-		let c in 0 .. 1;
+#[benchmarks(where T: mq::Config + configuration::Config + initializer::Config)]
+mod benchmarks {
+	use super::*;
 
-		let para = 42_u32.into();	// not especially important.
+	#[benchmark]
+	fn enact_candidate(u: Linear<0, 2>, h: Linear<0, 2>, c: Linear<0, 1>) {
+		let para = 42_u32.into(); // not especially important.
 
 		let max_len = mq::MaxMessageLenOf::<T>::get() as usize;
 
@@ -106,7 +102,7 @@ benchmarks! {
 		let n_validators = config.max_validators.unwrap_or(500);
 		let validators = generate_validator_pairs::<T>(n_validators);
 
-		let session = SessionIndex::from(0u32);
+		let session = SessionIndex::from(0_u32);
 		initializer::Pallet::<T>::test_trigger_on_new_session(
 			false,
 			session,
@@ -116,7 +112,7 @@ benchmarks! {
 		let backing_group_size = config.scheduler_params.max_validators_per_core.unwrap_or(5);
 		let head_data = HeadData(vec![0xFF; 1024]);
 
-		let relay_parent_number = BlockNumberFor::<T>::from(10u32);
+		let relay_parent_number = BlockNumberFor::<T>::from(10_u32);
 		let commitments = create_candidate_commitments::<T>(para, head_data, max_len, u, h, c != 0);
 		let backers = bitvec![u8, Lsb0; 1; backing_group_size as usize];
 		let availability_votes = bitvec![u8, Lsb0; 1; n_validators as usize];
@@ -135,17 +131,26 @@ benchmarks! {
 			ValidationCode(vec![1, 2, 3]).hash(),
 		);
 
-		let receipt = CommittedCandidateReceipt::<T::Hash> {
-			descriptor,
-			commitments,
-		};
+		let receipt = CommittedCandidateReceipt::<T::Hash> { descriptor, commitments };
 
-		Pallet::<T>::receive_upward_messages(para, vec![vec![0; max_len]; 1].as_slice());
-	} : { Pallet::<T>::enact_candidate(relay_parent_number, receipt, backers, availability_votes, core_index, backing_group) }
+		Pallet::<T>::receive_upward_messages(para, &vec![vec![0; max_len]; 1]);
 
-	impl_benchmark_test_suite!(
+		#[block]
+		{
+			Pallet::<T>::enact_candidate(
+				relay_parent_number,
+				receipt,
+				backers,
+				availability_votes,
+				core_index,
+				backing_group,
+			);
+		}
+	}
+
+	impl_benchmark_test_suite! {
 		Pallet,
 		crate::mock::new_test_ext(Default::default()),
 		crate::mock::Test
-	);
+	}
 }
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index da4f039624a3da6ffa0b09efbdd82537092a53b1..e5d703700fee57cb99d33bf1298ebafab7bd10f7 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -674,6 +674,7 @@ where
 			frame_system::CheckWeight::<Runtime>::new(),
 			pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
 			frame_metadata_hash_extension::CheckMetadataHash::new(true),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		)
 			.into();
 		let raw_payload = SignedPayload::new(call, tx_ext)
@@ -792,6 +793,7 @@ impl pallet_recovery::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type WeightInfo = ();
 	type RuntimeCall = RuntimeCall;
+	type BlockNumberProvider = System;
 	type Currency = Balances;
 	type ConfigDepositBase = ConfigDepositBase;
 	type FriendDepositFactor = FriendDepositFactor;
@@ -1616,6 +1618,7 @@ pub type TxExtension = (
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 /// Unchecked extrinsic type as expected by this runtime.
@@ -2273,7 +2276,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn current_set_id() -> fg_primitives::SetId {
-			Grandpa::current_set_id()
+			pallet_grandpa::CurrentSetId::<Runtime>::get()
 		}
 
 		fn submit_report_equivocation_unsigned_extrinsic(
diff --git a/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs
index 99dac1ba75f06f8d8e5e9b835a48f3b5f2974d90..88596a37cc01bbb00d630e84d761e35419b51369 100644
--- a/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs
+++ b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs
@@ -17,25 +17,23 @@
 //! Autogenerated weights for `frame_system_extensions`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
 // benchmark
 // pallet
-// --chain=rococo-dev
 // --steps=50
 // --repeat=20
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --pallet=frame_system_extensions
 // --extrinsic=*
-// --execution=wasm
 // --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=frame_system_extensions
+// --chain=rococo-dev
 // --header=./polkadot/file_header.txt
 // --output=./polkadot/runtime/rococo/src/weights/
 
@@ -50,45 +48,36 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 3_262_000 picoseconds.
-		Weight::from_parts(3_497_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `30`
+		//  Estimated: `0`
+		// Minimum execution time: 3_528_000 picoseconds.
+		Weight::from_parts(3_657_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_416_000 picoseconds.
-		Weight::from_parts(5_690_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `68`
+		//  Estimated: `0`
+		// Minimum execution time: 6_456_000 picoseconds.
+		Weight::from_parts(6_706_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_416_000 picoseconds.
-		Weight::from_parts(5_690_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `68`
+		//  Estimated: `0`
+		// Minimum execution time: 6_210_000 picoseconds.
+		Weight::from_parts(6_581_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 471_000 picoseconds.
-		Weight::from_parts(552_000, 0)
+		// Minimum execution time: 529_000 picoseconds.
+		Weight::from_parts(561_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::Account` (r:1 w:1)
@@ -97,8 +86,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 4_847_000 picoseconds.
-		Weight::from_parts(5_091_000, 0)
+		// Minimum execution time: 6_935_000 picoseconds.
+		Weight::from_parts(7_264_000, 0)
 			.saturating_add(Weight::from_parts(0, 3593))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -107,28 +96,32 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 388_000 picoseconds.
-		Weight::from_parts(421_000, 0)
+		// Minimum execution time: 452_000 picoseconds.
+		Weight::from_parts(474_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 378_000 picoseconds.
-		Weight::from_parts(440_000, 0)
+		// Minimum execution time: 422_000 picoseconds.
+		Weight::from_parts(460_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
-	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `24`
-		//  Estimated: `1489`
-		// Minimum execution time: 3_402_000 picoseconds.
-		Weight::from_parts(3_627_000, 0)
-			.saturating_add(Weight::from_parts(0, 1489))
-			.saturating_add(T::DbWeight::get().reads(1))
-			.saturating_add(T::DbWeight::get().writes(1))
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 3_632_000 picoseconds.
+		Weight::from_parts(3_784_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+	}
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_209_000 picoseconds.
+		Weight::from_parts(2_335_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 }
diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs
index a237d8cd36565c00803a66e5a873d166f284a554..10c3f6c0cbfcf2a25efc02524e46f6c4d256674d 100644
--- a/polkadot/runtime/rococo/src/xcm_config.rs
+++ b/polkadot/runtime/rococo/src/xcm_config.rs
@@ -85,7 +85,7 @@ pub type LocalAssetTransactor = FungibleAdapter<
 	LocalCheckAccount,
 >;
 
-/// The means that we convert an the XCM message origin location into a local dispatch origin.
+/// The means that we convert the XCM message origin location into a local dispatch origin.
 type LocalOriginConverter = (
 	// A `Signed` origin of the sovereign account that the original location controls.
 	SovereignSignedViaLocation<LocationConverter, RuntimeOrigin>,
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index d4031f7ac57ae0b5fac0884405e709a8409e176d..4f9ba8d8508cdecc904efefcde280e386fddc1d3 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -443,6 +443,7 @@ where
 			frame_system::CheckNonce::<Runtime>::from(nonce),
 			frame_system::CheckWeight::<Runtime>::new(),
 			pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		)
 			.into();
 		let raw_payload = SignedPayload::new(call, tx_ext)
@@ -834,6 +835,7 @@ pub type TxExtension = (
 	frame_system::CheckNonce<Runtime>,
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -1184,7 +1186,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn current_set_id() -> fg_primitives::SetId {
-			Grandpa::current_set_id()
+			pallet_grandpa::CurrentSetId::<Runtime>::get()
 		}
 
 		fn submit_report_equivocation_unsigned_extrinsic(
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index cbf2e02ce428d00b66be3a86d690235c7c24080e..9d77a5e5eea1f33b13c59762fe97f4f5e5d70a97 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -923,6 +923,7 @@ where
 			frame_system::CheckWeight::<Runtime>::new(),
 			pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
 			frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(true),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		)
 			.into();
 		let raw_payload = SignedPayload::new(call, tx_ext)
@@ -1018,6 +1019,7 @@ impl pallet_recovery::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type WeightInfo = ();
 	type RuntimeCall = RuntimeCall;
+	type BlockNumberProvider = System;
 	type Currency = Balances;
 	type ConfigDepositBase = ConfigDepositBase;
 	type FriendDepositFactor = FriendDepositFactor;
@@ -1813,6 +1815,7 @@ pub type TxExtension = (
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 parameter_types! {
@@ -2297,7 +2300,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn current_set_id() -> fg_primitives::SetId {
-			Grandpa::current_set_id()
+			pallet_grandpa::CurrentSetId::<Runtime>::get()
 		}
 
 		fn submit_report_equivocation_unsigned_extrinsic(
diff --git a/polkadot/runtime/westend/src/weights/frame_system_extensions.rs b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs
index 048f23fbcb91329eadfd721dd2ae63e1f218a89e..75f4f6d00b562dc589173c003b0e325e77f1b65f 100644
--- a/polkadot/runtime/westend/src/weights/frame_system_extensions.rs
+++ b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs
@@ -17,24 +17,25 @@
 //! Autogenerated weights for `frame_system_extensions`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-09-12, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/debug/polkadot
+// target/production/polkadot
 // benchmark
 // pallet
-// --steps=2
-// --repeat=2
+// --steps=50
+// --repeat=20
 // --extrinsic=*
 // --wasm-execution=compiled
 // --heap-pages=4096
-// --pallet=frame-system-extensions
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=frame_system_extensions
 // --chain=westend-dev
-// --output=./polkadot/runtime/westend/src/weights/
 // --header=./polkadot/file_header.txt
+// --output=./polkadot/runtime/westend/src/weights/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -47,45 +48,36 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 75_764_000 picoseconds.
-		Weight::from_parts(85_402_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `30`
+		//  Estimated: `0`
+		// Minimum execution time: 3_357_000 picoseconds.
+		Weight::from_parts(3_484_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 118_233_000 picoseconds.
-		Weight::from_parts(126_539_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `68`
+		//  Estimated: `0`
+		// Minimum execution time: 6_242_000 picoseconds.
+		Weight::from_parts(6_566_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 118_233_000 picoseconds.
-		Weight::from_parts(126_539_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `68`
+		//  Estimated: `0`
+		// Minimum execution time: 6_268_000 picoseconds.
+		Weight::from_parts(6_631_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_885_000 picoseconds.
-		Weight::from_parts(12_784_000, 0)
+		// Minimum execution time: 567_000 picoseconds.
+		Weight::from_parts(617_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::Account` (r:1 w:1)
@@ -94,8 +86,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 104_237_000 picoseconds.
-		Weight::from_parts(110_910_000, 0)
+		// Minimum execution time: 6_990_000 picoseconds.
+		Weight::from_parts(7_343_000, 0)
 			.saturating_add(Weight::from_parts(0, 3593))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -104,28 +96,32 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 6_141_000 picoseconds.
-		Weight::from_parts(11_502_000, 0)
+		// Minimum execution time: 422_000 picoseconds.
+		Weight::from_parts(475_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 6_192_000 picoseconds.
-		Weight::from_parts(11_481_000, 0)
+		// Minimum execution time: 434_000 picoseconds.
+		Weight::from_parts(519_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
-	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `24`
-		//  Estimated: `1489`
-		// Minimum execution time: 87_616_000 picoseconds.
-		Weight::from_parts(93_607_000, 0)
-			.saturating_add(Weight::from_parts(0, 1489))
-			.saturating_add(T::DbWeight::get().reads(1))
-			.saturating_add(T::DbWeight::get().writes(1))
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 3_524_000 picoseconds.
+		Weight::from_parts(3_706_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+	}
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_216_000 picoseconds.
+		Weight::from_parts(2_337_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 }
diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs
index 3d68d8ed16ae1459f7ac2dd39f1df80ecb44131e..e23412a97ebcb3e6f2999d0f48d555e53d02503e 100644
--- a/polkadot/xcm/xcm-builder/src/lib.rs
+++ b/polkadot/xcm/xcm-builder/src/lib.rs
@@ -132,11 +132,13 @@ pub use routing::{
 mod transactional;
 pub use transactional::FrameTransactionalProcessor;
 
+#[allow(deprecated)]
+pub use universal_exports::UnpaidLocalExporter;
 mod universal_exports;
 pub use universal_exports::{
 	ensure_is_remote, BridgeBlobDispatcher, BridgeMessage, DispatchBlob, DispatchBlobError,
-	ExporterFor, HaulBlob, HaulBlobError, HaulBlobExporter, NetworkExportTable,
-	NetworkExportTableItem, SovereignPaidRemoteExporter, UnpaidLocalExporter, UnpaidRemoteExporter,
+	ExporterFor, HaulBlob, HaulBlobError, HaulBlobExporter, LocalExporter, NetworkExportTable,
+	NetworkExportTableItem, SovereignPaidRemoteExporter, UnpaidRemoteExporter,
 };
 
 mod weight;
diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs
index ea584bf9d485a25c38ce0b673259b6ed8df7368f..5e930fe575c2b07c3c8d172f478d7f20ff519e76 100644
--- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs
@@ -28,7 +28,7 @@ parameter_types! {
 type TheBridge =
 	TestBridge<BridgeBlobDispatcher<TestRemoteIncomingRouter, RemoteUniversalLocation, ()>>;
 type Router = TestTopic<
-	UnpaidLocalExporter<
+	LocalExporter<
 		HaulBlobExporter<TheBridge, RemoteNetwork, AlwaysLatest, Price>,
 		UniversalLocation,
 	>,
diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs
index 38ffe2532d580a017efc21043e7313028700c046..a41f09721812a99f71fdcb58345e265472b163c6 100644
--- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs
@@ -28,7 +28,7 @@ parameter_types! {
 type TheBridge =
 	TestBridge<BridgeBlobDispatcher<TestRemoteIncomingRouter, RemoteUniversalLocation, ()>>;
 type Router = TestTopic<
-	UnpaidLocalExporter<
+	LocalExporter<
 		HaulBlobExporter<TheBridge, RemoteNetwork, AlwaysLatest, Price>,
 		UniversalLocation,
 	>,
diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs
index 767575e7f2dd9efa29cc1441a8cc2bf2cdaf3d19..90ad9921d65a13b8b5590507d0a7c36ba79b063a 100644
--- a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs
@@ -209,7 +209,7 @@ impl<Local: Get<Junctions>, Remote: Get<Junctions>, RemoteExporter: ExportXcm> S
 		let origin = Local::get().relative_to(&Remote::get());
 		AllowUnpaidFrom::set(vec![origin.clone()]);
 		set_exporter_override(price::<RemoteExporter>, deliver::<RemoteExporter>);
-		// The we execute it:
+		// Then we execute it:
 		let mut id = fake_id();
 		let outcome = XcmExecutor::<TestConfig>::prepare_and_execute(
 			origin,
diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs
index 26ea226313f068d9203bd8057790d3665bae2d11..6ebf6476f7e570f15a37908c40ea53c9810b2875 100644
--- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs
@@ -37,6 +37,7 @@ pub type TxExtension = (
 	frame_system::CheckMortality<Test>,
 	frame_system::CheckNonce<Test>,
 	frame_system::CheckWeight<Test>,
+	frame_system::WeightReclaim<Test>,
 );
 pub type Address = sp_runtime::MultiAddress<AccountId, AccountIndex>;
 pub type UncheckedExtrinsic =
diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs
index 6b3c3adf737dbb6e938d59383c5100041c493d52..e215aea3ab6858a0bcecd5b952cb4fd1384f52a9 100644
--- a/polkadot/xcm/xcm-builder/src/universal_exports.rs
+++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs
@@ -16,6 +16,8 @@
 
 //! Traits and utilities to help with origin mutation and bridging.
 
+#![allow(deprecated)]
+
 use crate::InspectMessageQueues;
 use alloc::{vec, vec::Vec};
 use codec::{Decode, Encode};
@@ -58,6 +60,8 @@ pub fn ensure_is_remote(
 /// that the message sending cannot be abused in any way.
 ///
 /// This is only useful when the local chain has bridging capabilities.
+#[deprecated(note = "Will be removed after July 2025; It uses hard-coded channel `0`, \
+	use `xcm_builder::LocalExporter` directly instead.")]
 pub struct UnpaidLocalExporter<Exporter, UniversalLocation>(
 	PhantomData<(Exporter, UniversalLocation)>,
 );
@@ -100,6 +104,54 @@ impl<Exporter: ExportXcm, UniversalLocation: Get<InteriorLocation>> SendXcm
 	fn ensure_successful_delivery(_: Option<Location>) {}
 }
 
+/// Implementation of `SendXcm` which uses the given `ExportXcm` implementation in order to forward
+/// the message over a bridge.
+///
+/// This is only useful when the local chain has bridging capabilities.
+pub struct LocalExporter<Exporter, UniversalLocation>(PhantomData<(Exporter, UniversalLocation)>);
+impl<Exporter: ExportXcm, UniversalLocation: Get<InteriorLocation>> SendXcm
+	for LocalExporter<Exporter, UniversalLocation>
+{
+	type Ticket = Exporter::Ticket;
+
+	fn validate(
+		dest: &mut Option<Location>,
+		msg: &mut Option<Xcm<()>>,
+	) -> SendResult<Exporter::Ticket> {
+		// This `clone` ensures that `dest` is not consumed in any case.
+		let d = dest.clone().take().ok_or(MissingArgument)?;
+		let universal_source = UniversalLocation::get();
+		let devolved = ensure_is_remote(universal_source.clone(), d).map_err(|_| NotApplicable)?;
+		let (remote_network, remote_location) = devolved;
+		let xcm = msg.take().ok_or(MissingArgument)?;
+
+		let hash =
+			(Some(Location::here()), &remote_location).using_encoded(sp_io::hashing::blake2_128);
+		let channel = u32::decode(&mut hash.as_ref()).unwrap_or(0);
+
+		validate_export::<Exporter>(
+			remote_network,
+			channel,
+			universal_source,
+			remote_location,
+			xcm.clone(),
+		)
+		.inspect_err(|err| {
+			if let NotApplicable = err {
+				// We need to make sure that msg is not consumed in case of `NotApplicable`.
+				*msg = Some(xcm);
+			}
+		})
+	}
+
+	fn deliver(ticket: Exporter::Ticket) -> Result<XcmHash, SendError> {
+		Exporter::deliver(ticket)
+	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_successful_delivery(_: Option<Location>) {}
+}
+
 pub trait ExporterFor {
 	/// Return the locally-routable bridge (if any) capable of forwarding `message` to the
 	/// `remote_location` on the remote `network`, together with the payment which is required.
@@ -703,9 +755,9 @@ mod tests {
 		let local_dest: Location = (Parent, Parachain(5678)).into();
 		assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err());
 
-		// UnpaidLocalExporter
+		// LocalExporter
 		ensure_validate_does_not_consume_dest_or_msg::<
-			UnpaidLocalExporter<RoutableBridgeExporter, UniversalLocation>,
+			LocalExporter<RoutableBridgeExporter, UniversalLocation>,
 		>(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result));
 
 		// 2. check with not applicable from the inner router (using `NotApplicableBridgeSender`)
@@ -713,14 +765,14 @@ mod tests {
 			(Parent, Parent, DifferentRemote::get(), RemoteDestination::get()).into();
 		assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok());
 
-		// UnpaidLocalExporter
+		// LocalExporter
 		ensure_validate_does_not_consume_dest_or_msg::<
-			UnpaidLocalExporter<NotApplicableBridgeExporter, UniversalLocation>,
+			LocalExporter<NotApplicableBridgeExporter, UniversalLocation>,
 		>(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result));
 
 		// 3. Ok - deliver
 		// UnpaidRemoteExporter
-		assert_ok!(send_xcm::<UnpaidLocalExporter<RoutableBridgeExporter, UniversalLocation>>(
+		assert_ok!(send_xcm::<LocalExporter<RoutableBridgeExporter, UniversalLocation>>(
 			remote_dest,
 			Xcm::default()
 		));
diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs
index fb5d1ae7c0e5ad92a77f201869ac9b4e5da09698..56a77094f177433cf23f32bf943ba2257f932df1 100644
--- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs
+++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs
@@ -60,7 +60,8 @@ construct_runtime! {
 	}
 }
 
-pub type TxExtension = (frame_system::CheckWeight<TestRuntime>,);
+pub type TxExtension =
+	(frame_system::CheckWeight<TestRuntime>, frame_system::WeightReclaim<TestRuntime>);
 
 // we only use the hash type from this, so using the mock should be fine.
 pub(crate) type Extrinsic = sp_runtime::generic::UncheckedExtrinsic<
diff --git a/polkadot/zombienet-sdk-tests/Cargo.toml b/polkadot/zombienet-sdk-tests/Cargo.toml
index 120857c9a42e22cb1634994694915cc143e1b0cf..ba7517ddce6631df2b56f3c9632c3ec7f55c5b6f 100644
--- a/polkadot/zombienet-sdk-tests/Cargo.toml
+++ b/polkadot/zombienet-sdk-tests/Cargo.toml
@@ -12,6 +12,7 @@ anyhow = { workspace = true }
 codec = { workspace = true, features = ["derive"] }
 env_logger = { workspace = true }
 log = { workspace = true }
+polkadot-primitives = { workspace = true, default-features = true }
 serde = { workspace = true }
 serde_json = { workspace = true }
 subxt = { workspace = true, features = ["substrate-compat"] }
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs
new file mode 100644
index 0000000000000000000000000000000000000000..42aa83d9da7a22c2e21b9020cade89cf7c89c8ca
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs
@@ -0,0 +1,135 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Test that a parachain that uses a basic collator (like adder-collator) with elastic scaling
+// can achieve full throughput of 3 candidates per block.
+
+use anyhow::anyhow;
+
+use crate::helpers::{
+	assert_para_throughput, rococo,
+	rococo::runtime_types::{
+		pallet_broker::coretime_interface::CoreAssignment,
+		polkadot_runtime_parachains::assigner_coretime::PartsOf57600,
+	},
+};
+use polkadot_primitives::Id as ParaId;
+use serde_json::json;
+use subxt::{OnlineClient, PolkadotConfig};
+use subxt_signer::sr25519::dev;
+use zombienet_sdk::NetworkConfigBuilder;
+
+#[tokio::test(flavor = "multi_thread")]
+async fn basic_3cores_test() -> Result<(), anyhow::Error> {
+	let _ = env_logger::try_init_from_env(
+		env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
+	);
+
+	let images = zombienet_sdk::environment::get_images_from_env();
+
+	let config = NetworkConfigBuilder::new()
+		.with_relaychain(|r| {
+			let r = r
+				.with_chain("rococo-local")
+				.with_default_command("polkadot")
+				.with_default_image(images.polkadot.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_genesis_overrides(json!({
+					"configuration": {
+						"config": {
+							"scheduler_params": {
+								"num_cores": 2,
+								"max_validators_per_core": 1
+							},
+							"async_backing_params": {
+								"max_candidate_depth": 6,
+								"allowed_ancestry_len": 2
+							}
+						}
+					}
+				}))
+				// Have to set a `with_node` outside of the loop below, so that `r` has the right
+				// type.
+				.with_node(|node| node.with_name("validator-0"));
+
+			(1..4).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}"))))
+		})
+		.with_parachain(|p| {
+			p.with_id(2000)
+				.with_default_command("adder-collator")
+				.cumulus_based(false)
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_collator(|n| n.with_name("adder-2000"))
+		})
+		.with_parachain(|p| {
+			p.with_id(2001)
+				.with_default_command("adder-collator")
+				.cumulus_based(false)
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_collator(|n| n.with_name("adder-2001"))
+		})
+		.build()
+		.map_err(|e| {
+			let errs = e.into_iter().map(|e| e.to_string()).collect::<Vec<_>>().join(" ");
+			anyhow!("config errs: {errs}")
+		})?;
+
+	let spawn_fn = zombienet_sdk::environment::get_spawn_fn();
+	let network = spawn_fn(config).await?;
+
+	let relay_node = network.get_node("validator-0")?;
+
+	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
+	let alice = dev::alice();
+
+	// Assign two extra cores to adder-2000.
+	relay_client
+		.tx()
+		.sign_and_submit_then_watch_default(
+			&rococo::tx()
+				.sudo()
+				.sudo(rococo::runtime_types::rococo_runtime::RuntimeCall::Utility(
+					rococo::runtime_types::pallet_utility::pallet::Call::batch {
+						calls: vec![
+							rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime(
+								rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core {
+									core: 0,
+									begin: 0,
+									assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))],
+									end_hint: None
+								}
+							),
+							rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime(
+								rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core {
+									core: 1,
+									begin: 0,
+									assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))],
+									end_hint: None
+								}
+							),
+						],
+					},
+				)),
+			&alice,
+		)
+		.await?
+		.wait_for_finalized_success()
+		.await?;
+
+	log::info!("2 more cores assigned to adder-2000");
+
+	assert_para_throughput(
+		&relay_client,
+		15,
+		[(ParaId::from(2000), 40..46), (ParaId::from(2001), 12..16)]
+			.into_iter()
+			.collect(),
+	)
+	.await?;
+
+	log::info!("Test finished successfully");
+
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f83400d2b22abe7418d5e022b28d773ca04e790b
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
@@ -0,0 +1,133 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Test that a paraid that doesn't use elastic scaling which acquired multiple cores does not brick
+// itself if ElasticScalingMVP feature is enabled in genesis.
+
+use anyhow::anyhow;
+
+use crate::helpers::{
+	assert_finalized_block_height, assert_para_throughput, rococo,
+	rococo::runtime_types::{
+		pallet_broker::coretime_interface::CoreAssignment,
+		polkadot_runtime_parachains::assigner_coretime::PartsOf57600,
+	},
+};
+use polkadot_primitives::{CoreIndex, Id as ParaId};
+use serde_json::json;
+use std::collections::{BTreeMap, VecDeque};
+use subxt::{OnlineClient, PolkadotConfig};
+use subxt_signer::sr25519::dev;
+use zombienet_sdk::NetworkConfigBuilder;
+
+#[tokio::test(flavor = "multi_thread")]
+async fn doesnt_break_parachains_test() -> Result<(), anyhow::Error> {
+	let _ = env_logger::try_init_from_env(
+		env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
+	);
+
+	let images = zombienet_sdk::environment::get_images_from_env();
+
+	let config = NetworkConfigBuilder::new()
+		.with_relaychain(|r| {
+			let r = r
+				.with_chain("rococo-local")
+				.with_default_command("polkadot")
+				.with_default_image(images.polkadot.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_genesis_overrides(json!({
+					"configuration": {
+						"config": {
+							"scheduler_params": {
+								"num_cores": 1,
+								"max_validators_per_core": 2
+							},
+							"async_backing_params": {
+								"max_candidate_depth": 6,
+								"allowed_ancestry_len": 2
+							}
+						}
+					}
+				}))
+				// Have to set a `with_node` outside of the loop below, so that `r` has the right
+				// type.
+				.with_node(|node| node.with_name("validator-0"));
+
+			(1..4).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}"))))
+		})
+		.with_parachain(|p| {
+			// Use rococo-parachain default, which has 6 second slot time. Also, don't use
+			// slot-based collator.
+			p.with_id(2000)
+				.with_default_command("polkadot-parachain")
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug,aura=debug").into()])
+				.with_collator(|n| n.with_name("collator-2000"))
+		})
+		.build()
+		.map_err(|e| {
+			let errs = e.into_iter().map(|e| e.to_string()).collect::<Vec<_>>().join(" ");
+			anyhow!("config errs: {errs}")
+		})?;
+
+	let spawn_fn = zombienet_sdk::environment::get_spawn_fn();
+	let network = spawn_fn(config).await?;
+
+	let relay_node = network.get_node("validator-0")?;
+	let para_node = network.get_node("collator-2000")?;
+
+	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
+	let alice = dev::alice();
+
+	relay_client
+		.tx()
+		.sign_and_submit_then_watch_default(
+			&rococo::tx()
+				.sudo()
+				.sudo(rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime(
+                    rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core {
+                        core: 0,
+                        begin: 0,
+                        assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))],
+                        end_hint: None
+                    }
+                )),
+			&alice,
+		)
+		.await?
+		.wait_for_finalized_success()
+		.await?;
+
+	log::info!("1 more core assigned to the parachain");
+
+	let para_id = ParaId::from(2000);
+	// Expect the parachain to be making normal progress, 1 candidate backed per relay chain block.
+	assert_para_throughput(&relay_client, 15, [(para_id, 13..16)].into_iter().collect()).await?;
+
+	let para_client = para_node.wait_client().await?;
+	// Assert the parachain finalized block height is also on par with the number of backed
+	// candidates.
+	assert_finalized_block_height(&para_client, 12..16).await?;
+
+	// Sanity check that indeed the parachain has two assigned cores.
+	let cq = relay_client
+		.runtime_api()
+		.at_latest()
+		.await?
+		.call_raw::<BTreeMap<CoreIndex, VecDeque<ParaId>>>("ParachainHost_claim_queue", None)
+		.await?;
+
+	assert_eq!(
+		cq,
+		[
+			(CoreIndex(0), [para_id, para_id].into_iter().collect()),
+			(CoreIndex(1), [para_id, para_id].into_iter().collect()),
+		]
+		.into_iter()
+		.collect()
+	);
+
+	log::info!("Test finished successfully");
+
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs
index bb296a419df1f2387d7ff44d4f71769eaadd0467..9cfd5db5a096dbd393f84ea4fff7bf2b1df59227 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs
@@ -1,8 +1,6 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
 // SPDX-License-Identifier: Apache-2.0
 
-#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")]
-pub mod rococo {}
-
-mod helpers;
+mod basic_3cores;
+mod doesnt_break_parachains;
 mod slot_based_3cores;
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
index 41ec1250ecc4479f6d0cca4ff61c49b5bc33b901..aa9f41320135defd9a85e9ba3c5dafa623a3187b 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
@@ -6,14 +6,14 @@
 
 use anyhow::anyhow;
 
-use super::{
-	helpers::assert_para_throughput,
-	rococo,
+use crate::helpers::{
+	assert_finalized_block_height, assert_para_throughput, rococo,
 	rococo::runtime_types::{
 		pallet_broker::coretime_interface::CoreAssignment,
 		polkadot_runtime_parachains::assigner_coretime::PartsOf57600,
 	},
 };
+use polkadot_primitives::Id as ParaId;
 use serde_json::json;
 use subxt::{OnlineClient, PolkadotConfig};
 use subxt_signer::sr25519::dev;
@@ -63,7 +63,6 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> {
 				.with_default_command("test-parachain")
 				.with_default_image(images.cumulus.as_str())
 				.with_chain("elastic-scaling-mvp")
-				.with_default_args(vec![("--experimental-use-slot-based").into()])
 				.with_default_args(vec![
 					("--experimental-use-slot-based").into(),
 					("-lparachain=debug,aura=debug").into(),
@@ -93,6 +92,8 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> {
 	let network = spawn_fn(config).await?;
 
 	let relay_node = network.get_node("validator-0")?;
+	let para_node_elastic = network.get_node("collator-elastic")?;
+	let para_node_elastic_mvp = network.get_node("collator-elastic-mvp")?;
 
 	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
 	let alice = dev::alice();
@@ -156,10 +157,17 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> {
 	assert_para_throughput(
 		&relay_client,
 		15,
-		[(2100, 39..46), (2200, 39..46)].into_iter().collect(),
+		[(ParaId::from(2100), 39..46), (ParaId::from(2200), 39..46)]
+			.into_iter()
+			.collect(),
 	)
 	.await?;
 
+	// Assert the parachain finalized block height is also on par with the number of backed
+	// candidates.
+	assert_finalized_block_height(&para_node_elastic.wait_client().await?, 36..46).await?;
+	assert_finalized_block_height(&para_node_elastic_mvp.wait_client().await?, 36..46).await?;
+
 	log::info!("Test finished successfully");
 
 	Ok(())
diff --git a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs
new file mode 100644
index 0000000000000000000000000000000000000000..14f86eb130f78e24796db6280f17fcb430d604ae
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs
@@ -0,0 +1,95 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Test we are producing 12-second parachain blocks if using an old collator, pre async-backing.
+
+use anyhow::anyhow;
+
+use crate::helpers::{assert_finalized_block_height, assert_para_throughput};
+use polkadot_primitives::Id as ParaId;
+use serde_json::json;
+use subxt::{OnlineClient, PolkadotConfig};
+use zombienet_sdk::NetworkConfigBuilder;
+
+#[tokio::test(flavor = "multi_thread")]
+async fn async_backing_6_seconds_rate_test() -> Result<(), anyhow::Error> {
+	let _ = env_logger::try_init_from_env(
+		env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
+	);
+
+	let images = zombienet_sdk::environment::get_images_from_env();
+
+	let config = NetworkConfigBuilder::new()
+		.with_relaychain(|r| {
+			let r = r
+				.with_chain("rococo-local")
+				.with_default_command("polkadot")
+				.with_default_image(images.polkadot.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_genesis_overrides(json!({
+					"configuration": {
+						"config": {
+							"scheduler_params": {
+								"group_rotation_frequency": 4,
+								"lookahead": 2,
+								"max_candidate_depth": 3,
+								"allowed_ancestry_len": 2,
+							},
+						}
+					}
+				}))
+				.with_node(|node| node.with_name("validator-0"));
+
+			(1..12)
+				.fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}"))))
+		})
+		.with_parachain(|p| {
+			p.with_id(2000)
+				.with_default_command("adder-collator")
+				.with_default_image(
+					std::env::var("COL_IMAGE")
+						.unwrap_or("docker.io/paritypr/colander:latest".to_string())
+						.as_str(),
+				)
+				.cumulus_based(false)
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_collator(|n| n.with_name("collator-adder-2000"))
+		})
+		.with_parachain(|p| {
+			p.with_id(2001)
+				.with_default_command("polkadot-parachain")
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug,aura=debug").into()])
+				.with_collator(|n| n.with_name("collator-2001"))
+		})
+		.build()
+		.map_err(|e| {
+			let errs = e.into_iter().map(|e| e.to_string()).collect::<Vec<_>>().join(" ");
+			anyhow!("config errs: {errs}")
+		})?;
+
+	let spawn_fn = zombienet_sdk::environment::get_spawn_fn();
+	let network = spawn_fn(config).await?;
+
+	let relay_node = network.get_node("validator-0")?;
+	let para_node_2001 = network.get_node("collator-2001")?;
+
+	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
+
+	assert_para_throughput(
+		&relay_client,
+		15,
+		[(ParaId::from(2000), 11..16), (ParaId::from(2001), 11..16)]
+			.into_iter()
+			.collect(),
+	)
+	.await?;
+
+	// Assert the parachain finalized block height is also on par with the number of backed
+	// candidates. We can only do this for the collator based on cumulus.
+	assert_finalized_block_height(&para_node_2001.wait_client().await?, 10..16).await?;
+
+	log::info!("Test finished successfully");
+
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/functional/mod.rs b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ecdab38e1d2865faaa6957c5f2ce331dc96d61df
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs
@@ -0,0 +1,5 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+mod async_backing_6_seconds_rate;
+mod sync_backing;
diff --git a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs
new file mode 100644
index 0000000000000000000000000000000000000000..6da45e2844919ea924317d2a65ced7e57e7c8060
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs
@@ -0,0 +1,74 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Test we are producing 12-second parachain blocks if using an old collator, pre async-backing.
+
+use anyhow::anyhow;
+
+use crate::helpers::{assert_finalized_block_height, assert_para_throughput};
+use polkadot_primitives::Id as ParaId;
+use serde_json::json;
+use subxt::{OnlineClient, PolkadotConfig};
+use zombienet_sdk::NetworkConfigBuilder;
+
+#[tokio::test(flavor = "multi_thread")]
+async fn sync_backing_test() -> Result<(), anyhow::Error> {
+	let _ = env_logger::try_init_from_env(
+		env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
+	);
+
+	let images = zombienet_sdk::environment::get_images_from_env();
+
+	let config = NetworkConfigBuilder::new()
+		.with_relaychain(|r| {
+			let r = r
+				.with_chain("rococo-local")
+				.with_default_command("polkadot")
+				.with_default_image(images.polkadot.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_genesis_overrides(json!({
+					"configuration": {
+						"config": {
+							"scheduler_params": {
+								"group_rotation_frequency": 4,
+							},
+						}
+					}
+				}))
+				.with_node(|node| node.with_name("validator-0"));
+
+			(1..5).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}"))))
+		})
+		.with_parachain(|p| {
+			p.with_id(2000)
+				.with_default_command("polkadot-parachain")
+				// This must be a very old polkadot-parachain image, pre async backing
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug,aura=debug").into()])
+				.with_collator(|n| n.with_name("collator-2000"))
+		})
+		.build()
+		.map_err(|e| {
+			let errs = e.into_iter().map(|e| e.to_string()).collect::<Vec<_>>().join(" ");
+			anyhow!("config errs: {errs}")
+		})?;
+
+	let spawn_fn = zombienet_sdk::environment::get_spawn_fn();
+	let network = spawn_fn(config).await?;
+
+	let relay_node = network.get_node("validator-0")?;
+	let para_node = network.get_node("collator-2000")?;
+
+	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
+
+	assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 5..9)].into_iter().collect())
+		.await?;
+
+	// Assert the parachain finalized block height is also on par with the number of backed
+	// candidates.
+	assert_finalized_block_height(&para_node.wait_client().await?, 5..9).await?;
+
+	log::info!("Test finished successfully");
+
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs b/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs
similarity index 65%
rename from polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs
rename to polkadot/zombienet-sdk-tests/tests/helpers/mod.rs
index 7d4ad4a1dd8b1a05738e5dd9a9ef06f55ad26be3..470345ca4d621fb87100b9c783098d288b22cdb6 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs
+++ b/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs
@@ -1,19 +1,22 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
 // SPDX-License-Identifier: Apache-2.0
 
-use super::rococo;
+use polkadot_primitives::Id as ParaId;
 use std::{collections::HashMap, ops::Range};
 use subxt::{OnlineClient, PolkadotConfig};
 
+#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")]
+pub mod rococo {}
+
 // Helper function for asserting the throughput of parachains (total number of backed candidates in
 // a window of relay chain blocks), after the first session change.
 pub async fn assert_para_throughput(
 	relay_client: &OnlineClient<PolkadotConfig>,
 	stop_at: u32,
-	expected_candidate_ranges: HashMap<u32, Range<u32>>,
+	expected_candidate_ranges: HashMap<ParaId, Range<u32>>,
 ) -> Result<(), anyhow::Error> {
 	let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?;
-	let mut candidate_count: HashMap<u32, u32> = HashMap::new();
+	let mut candidate_count: HashMap<ParaId, u32> = HashMap::new();
 	let mut current_block_count = 0;
 	let mut had_first_session_change = false;
 
@@ -31,7 +34,7 @@ pub async fn assert_para_throughput(
 			current_block_count += 1;
 
 			for event in events.find::<rococo::para_inclusion::events::CandidateBacked>() {
-				*(candidate_count.entry(event?.0.descriptor.para_id.0).or_default()) += 1;
+				*(candidate_count.entry(event?.0.descriptor.para_id.0.into()).or_default()) += 1;
 			}
 		}
 
@@ -58,3 +61,21 @@ pub async fn assert_para_throughput(
 
 	Ok(())
 }
+
+// Helper function for retrieving the latest finalized block height and asserting it's within a
+// range.
+pub async fn assert_finalized_block_height(
+	client: &OnlineClient<PolkadotConfig>,
+	expected_range: Range<u32>,
+) -> Result<(), anyhow::Error> {
+	if let Some(block) = client.blocks().subscribe_finalized().await?.next().await {
+		let height = block?.number();
+		log::info!("Finalized block number {height}");
+
+		assert!(
+			expected_range.contains(&height),
+			"Finalized block number {height} not within range {expected_range:?}"
+		);
+	}
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/lib.rs b/polkadot/zombienet-sdk-tests/tests/lib.rs
index 977e0f90b1c94ad333fa02baa23b10a4132bde69..9feb9775e450e0149b0e357c4861a62beff5732e 100644
--- a/polkadot/zombienet-sdk-tests/tests/lib.rs
+++ b/polkadot/zombienet-sdk-tests/tests/lib.rs
@@ -1,7 +1,12 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
 // SPDX-License-Identifier: Apache-2.0
 
+#[cfg(feature = "zombie-metadata")]
+mod helpers;
+
 #[cfg(feature = "zombie-metadata")]
 mod elastic_scaling;
 #[cfg(feature = "zombie-metadata")]
+mod functional;
+#[cfg(feature = "zombie-metadata")]
 mod smoke;
diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs
index 2da2436a1111221fff1aeb2fb57b09798ea37880..59a71a83e01ecf91bbad9b5358caa0bc08f0cd0e 100644
--- a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs
+++ b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs
@@ -10,21 +10,24 @@
 //! normal parachain runtime WILL mess things up.
 
 use anyhow::anyhow;
-#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")]
-pub mod rococo {}
 
 #[subxt::subxt(runtime_metadata_path = "metadata-files/coretime-rococo-local.scale")]
 mod coretime_rococo {}
 
-use rococo::runtime_types::{
-	staging_xcm::v4::{
-		asset::{Asset, AssetId, Assets, Fungibility},
-		junction::Junction,
-		junctions::Junctions,
-		location::Location,
+use crate::helpers::rococo::{
+	self as rococo_api,
+	runtime_types::{
+		polkadot_parachain_primitives::primitives,
+		staging_xcm::v4::{
+			asset::{Asset, AssetId, Assets, Fungibility},
+			junction::Junction,
+			junctions::Junctions,
+			location::Location,
+		},
+		xcm::{VersionedAssets, VersionedLocation},
 	},
-	xcm::{VersionedAssets, VersionedLocation},
 };
+
 use serde_json::json;
 use std::{fmt::Display, sync::Arc};
 use subxt::{events::StaticEvent, utils::AccountId32, OnlineClient, PolkadotConfig};
@@ -41,8 +44,6 @@ use coretime_rococo::{
 	},
 };
 
-use rococo::{self as rococo_api, runtime_types::polkadot_parachain_primitives::primitives};
-
 type CoretimeRuntimeCall = coretime_api::runtime_types::coretime_rococo_runtime::RuntimeCall;
 type CoretimeUtilityCall = coretime_api::runtime_types::pallet_utility::pallet::Call;
 type CoretimeBrokerCall = coretime_api::runtime_types::pallet_broker::pallet::Call;
diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml
deleted file mode 100644
index 611978a33a5f145274dd3c6c158e0de69a1c436a..0000000000000000000000000000000000000000
--- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml
+++ /dev/null
@@ -1,49 +0,0 @@
-[settings]
-timeout = 1000
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 6
-  allowed_ancestry_len = 2
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
-  max_validators_per_core = 1
-  num_cores = 3
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params]
-  max_approval_coalesce_count = 5
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-default_command = "polkadot"
-
-  [relaychain.default_resources]
-  limits = { memory = "4G", cpu = "3" }
-  requests = { memory = "4G", cpu = "3" }
-
-  [[relaychain.node_groups]]
-  name = "elastic-validator"
-  count = 5
-  args = [ "-lparachain=debug,parachain::candidate-backing=trace,parachain::provisioner=trace,parachain::prospective-parachains=trace,runtime=debug"]
-
-{% for id in range(2000,2002) %}
-[[parachains]]
-id = {{id}}
-addToGenesis = true
-    [parachains.default_resources]
-    limits = { memory = "4G", cpu = "3" }
-    requests = { memory = "4G", cpu = "3" }
-
-    [parachains.collator]
-    name = "some-parachain"
-    image = "{{COL_IMAGE}}"
-    command = "adder-collator"
-    args = ["-lparachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug"]
-
-{% endfor %}
-
-# This represents the layout of the adder collator block header.
-[types.Header]
-number = "u64"
-parent_hash = "Hash"
-post_state = "Hash"
diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl
deleted file mode 100644
index d47ef8f415f7ac9ca94b825de23580ab6131f013..0000000000000000000000000000000000000000
--- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl
+++ /dev/null
@@ -1,28 +0,0 @@
-Description: Test with adder collator using 3 cores and async backing
-Network: ./0001-basic-3cores-6s-blocks.toml
-Creds: config
-
-# Check authority status.
-elastic-validator-0: reports node_roles is 4
-elastic-validator-1: reports node_roles is 4
-elastic-validator-2: reports node_roles is 4
-elastic-validator-3: reports node_roles is 4
-elastic-validator-4: reports node_roles is 4
-
-
-# Register 2 extra cores to this some-parachain.
-elastic-validator-0: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds
-elastic-validator-0: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds
-
-# Wait for 20 relay chain blocks 
-elastic-validator-0: reports substrate_block_height{status="best"} is at least 20 within 600 seconds
-
-# Non elastic parachain should progress normally
-some-parachain-1: count of log lines containing "Parachain velocity: 1" is at least 5 within 20 seconds
-# Sanity
-some-parachain-1: count of log lines containing "Parachain velocity: 2" is 0
-
-# Parachain should progress 3 blocks per relay chain block ideally, however CI might not be
-# the most performant environment so we'd just use a lower bound of 2 blocks per RCB
-elastic-validator-0: parachain 2000 block height is at least 20 within 200 seconds
-
diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml
deleted file mode 100644
index 046d707cc1e8db0f08e7870b663e715f3c5f9420..0000000000000000000000000000000000000000
--- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml
+++ /dev/null
@@ -1,40 +0,0 @@
-[settings]
-timeout = 1000
-bootnode = true
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config]
-  needed_approvals = 4
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
-  max_validators_per_core = 2
-  num_cores = 2
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-default_command = "polkadot"
-
-[relaychain.default_resources]
-limits = { memory = "4G", cpu = "2" }
-requests = { memory = "2G", cpu = "1" }
-
-  [[relaychain.nodes]]
-  name = "alice"
-  validator = "true"
-
-  [[relaychain.node_groups]]
-  name = "validator"
-  count = 3
-  args = [ "-lparachain=debug,runtime=debug"]
-
-[[parachains]]
-id = 2000
-default_command = "polkadot-parachain"
-add_to_genesis = false
-register_para = true
-onboard_as_parachain = false
-
-  [parachains.collator]
-  name = "collator2000"
-  command = "polkadot-parachain"
-  args = [ "-lparachain=debug", "--experimental-use-slot-based" ]
diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl
deleted file mode 100644
index 0cfc29f532d1e433bd1b5879c6cea93eb94989bf..0000000000000000000000000000000000000000
--- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl
+++ /dev/null
@@ -1,20 +0,0 @@
-Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled in genesis
-Network: ./0002-elastic-scaling-doesnt-break-parachains.toml
-Creds: config
-
-# Check authority status.
-validator: reports node_roles is 4
-
-validator: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds
-
-# Ensure parachain was able to make progress.
-validator: parachain 2000 block height is at least 10 within 200 seconds
-
-# Register the second core assigned to this parachain.
-alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds
-alice: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds
-
-validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds
-
-# Ensure parachain is now making progress.
-validator: parachain 2000 block height is at least 30 within 200 seconds
diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js
deleted file mode 120000
index eeb6402c06f5e52cedf150f924d6791beb1d9867..0000000000000000000000000000000000000000
--- a/polkadot/zombienet_tests/elastic_scaling/assign-core.js
+++ /dev/null
@@ -1 +0,0 @@
-../assign-core.js
\ No newline at end of file
diff --git a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml
deleted file mode 100644
index b776622fdce33df2e9a78debc31ee3e62ae4805d..0000000000000000000000000000000000000000
--- a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml
+++ /dev/null
@@ -1,54 +0,0 @@
-[settings]
-timeout = 1000
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config]
-  needed_approvals = 4
-  relay_vrf_modulo_samples = 6
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 3
-  allowed_ancestry_len = 2
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
-  lookahead = 2
-  group_rotation_frequency = 4
-
-
-[relaychain.default_resources]
-limits = { memory = "4G", cpu = "2" }
-requests = { memory = "2G", cpu = "1" }
-
-  [[relaychain.node_groups]]
-  name = "alice"
-  args = [ "-lparachain=debug" ]
-  count = 12
-
-[[parachains]]
-id = 2000
-addToGenesis = true
-genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=1"
-
-  [parachains.collator]
-  name = "collator01"
-  image = "{{COL_IMAGE}}"
-  command = "undying-collator"
-  args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=1", "--parachain-id=2000"]
-
-[[parachains]]
-id = 2001
-cumulus_based = true
-
-  [parachains.collator]
-  name = "collator02"
-  image = "{{CUMULUS_IMAGE}}"
-  command = "polkadot-parachain"
-  args = ["-lparachain=debug"]
-
-[types.Header]
-number = "u64"
-parent_hash = "Hash"
-post_state = "Hash"
\ No newline at end of file
diff --git a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl
deleted file mode 100644
index 0d01af82833e36afd3c38b2e00e9d604ace46797..0000000000000000000000000000000000000000
--- a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl
+++ /dev/null
@@ -1,20 +0,0 @@
-Description: Test we are producing blocks at 6 seconds clip
-Network: ./0011-async-backing-6-seconds-rate.toml
-Creds: config
-
-# Check authority status.
-alice: reports node_roles is 4
-
-# Ensure parachains are registered.
-alice: parachain 2000 is registered within 60 seconds
-alice: parachain 2001 is registered within 60 seconds
-
-# Ensure parachains made progress.
-alice: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds
-
-# This parachains should produce blocks at 6s clip, let's assume an 8s rate, allowing for
-# some slots to be missed on slower machines
-alice: parachain 2000 block height is at least 30 within 240 seconds
-# This should already have produced the needed blocks
-alice: parachain 2001 block height is at least 30 within 6 seconds
-
diff --git a/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml
index 881abab64fd07b8495189982119b3f985332b8a7..874b8a09bb2489e0b00a677da3fd3aaea1a08beb 100644
--- a/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml
+++ b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml
@@ -42,7 +42,8 @@ chain = "glutton-westend-local-{{id}}"
 
     [parachains.collator]
     name = "collator"
-    image = "{{CUMULUS_IMAGE}}"
+    # Use an old image that does not send out v2 receipts, as the old validators will still check the collator signatures.
+    image = "docker.io/paritypr/polkadot-parachain-debug:master-bde0bbe5"
     args = ["-lparachain=debug"]
 
 {% endfor %}
diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.toml b/polkadot/zombienet_tests/functional/0017-sync-backing.toml
deleted file mode 100644
index 2550054c8dadaf75b34b4c4a50f402576f3f5266..0000000000000000000000000000000000000000
--- a/polkadot/zombienet_tests/functional/0017-sync-backing.toml
+++ /dev/null
@@ -1,48 +0,0 @@
-[settings]
-timeout = 1000
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 0
-  allowed_ancestry_len = 0
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
-  lookahead = 2
-  group_rotation_frequency = 4
-
-[relaychain.default_resources]
-limits = { memory = "4G", cpu = "2" }
-requests = { memory = "2G", cpu = "1" }
-
-  [[relaychain.node_groups]]
-  name = "alice"
-  args = [ "-lparachain=debug" ]
-  count = 10
-
-[[parachains]]
-id = 2000
-addToGenesis = true
-
-  [parachains.collator]
-  name = "collator01"
-  image = "{{COL_IMAGE}}"
-  command = "adder-collator"
-  args = ["-lparachain=debug"]
-
-[[parachains]]
-id = 2001
-cumulus_based = true
-
-  [parachains.collator]
-  name = "collator02"
-  image = "{{CUMULUS_IMAGE}}"
-  command = "polkadot-parachain"
-  args = ["-lparachain=debug"]
-
-[types.Header]
-number = "u64"
-parent_hash = "Hash"
-post_state = "Hash"
\ No newline at end of file
diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl b/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl
deleted file mode 100644
index a53de784b2d1349fa890ce51984b6865c2c94fd5..0000000000000000000000000000000000000000
--- a/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl
+++ /dev/null
@@ -1,22 +0,0 @@
-Description: Test we are producing 12-second parachain blocks if sync backing is configured
-Network: ./0017-sync-backing.toml
-Creds: config
-
-# Check authority status.
-alice: reports node_roles is 4
-
-# Ensure parachains are registered.
-alice: parachain 2000 is registered within 60 seconds
-alice: parachain 2001 is registered within 60 seconds
-
-# Ensure parachains made progress.
-alice: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds
-
-# This parachains should produce blocks at 12s clip, let's assume an 14s rate, allowing for
-# some slots to be missed on slower machines
-alice: parachain 2000 block height is at least 21 within 300 seconds
-alice: parachain 2000 block height is lower than 25 within 2 seconds
-
-# This should already have produced the needed blocks
-alice: parachain 2001 block height is at least 21 within 10 seconds
-alice: parachain 2001 block height is lower than 25 within 2 seconds
diff --git a/prdoc/pr_4529.prdoc b/prdoc/pr_4529.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..32beea17ad6b667893564c9105f3a46b3c06e061
--- /dev/null
+++ b/prdoc/pr_4529.prdoc
@@ -0,0 +1,22 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Removed `pallet::getter` usage from pallet-grandpa
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      This PR removed the `pallet::getter`s from `pallet-grandpa`.
+      The syntax `StorageItem::<T, I>::get()` should be used instead
+
+crates:
+  - name: pallet-grandpa
+    bump: minor
+  - name: kitchensink-runtime
+    bump: none
+  - name: westend-runtime
+    bump: none
+  - name: polkadot-test-runtime
+    bump: none
+  - name: rococo-runtime
+    bump: none
diff --git a/prdoc/pr_6140.prdoc b/prdoc/pr_6140.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..7e2bd3802cd7cc8b25abfefb1d98d4528c5c0b06
--- /dev/null
+++ b/prdoc/pr_6140.prdoc
@@ -0,0 +1,95 @@
+title: Accurate weight reclaim with frame_system::WeightReclaim and cumulus `StorageWeightReclaim` transaction extensions
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      Since the introduction of transaction extension, the transaction extension weight is no longer part of base extrinsic weight. As a consequence some weight of transaction extensions are missed when calculating post dispatch weight and reclaiming unused block weight.
+
+      For solo chains, in order to reclaim the weight accurately `frame_system::WeightReclaim` transaction extension must be used at the end of the transaction extension pipeline.
+
+      For para chains `StorageWeightReclaim` in `cumulus-primitives-storage-weight-reclaim` is deprecated.
+      A new transaction extension `StorageWeightReclaim` in `cumulus-pallet-weight-reclaim` is introduced.
+      `StorageWeightReclaim` is meant to be used as a wrapping of the whole transaction extension pipeline, and will take into account all proof size accurately.
+
+      The new wrapping transaction extension is used like this:
+      ```rust
+      /// The TransactionExtension to the basic transaction logic.
+      pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+             Runtime,
+             (
+                     frame_system::CheckNonZeroSender<Runtime>,
+                     frame_system::CheckSpecVersion<Runtime>,
+                     frame_system::CheckTxVersion<Runtime>,
+                     frame_system::CheckGenesis<Runtime>,
+                     frame_system::CheckEra<Runtime>,
+                     frame_system::CheckNonce<Runtime>,
+                     pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+                     BridgeRejectObsoleteHeadersAndMessages,
+                     (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,),
+                     frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+                     frame_system::CheckWeight<Runtime>,
+             ),
+      >;
+      ```
+
+      NOTE: prior to transaction extension, `StorageWeightReclaim` also missed the some proof size used by other transaction extension prior to itself. This is also fixed by the wrapping `StorageWeightReclaim`.
+
+crates:
+- name: cumulus-primitives-storage-weight-reclaim
+  bump: minor
+- name: sp-runtime
+  bump: patch
+- name: polkadot-sdk
+  bump: minor
+- name: asset-hub-rococo-runtime
+  bump: major
+- name: asset-hub-westend-runtime
+  bump: major
+- name: bridge-hub-rococo-runtime
+  bump: major
+- name: bridge-hub-westend-runtime
+  bump: major
+- name: collectives-westend-runtime
+  bump: major
+- name: coretime-rococo-runtime
+  bump: major
+- name: coretime-westend-runtime
+  bump: major
+- name: people-rococo-runtime
+  bump: major
+- name: people-westend-runtime
+  bump: major
+- name: contracts-rococo-runtime
+  bump: major
+- name: frame-support
+  bump: minor
+- name: frame-executive
+  bump: patch
+- name: frame-system
+  bump: major
+- name: staging-xcm-builder
+  bump: patch
+- name: xcm-runtime-apis
+  bump: patch
+- name: cumulus-pallet-weight-reclaim
+  bump: major
+- name: polkadot-service
+  bump: major
+- name: westend-runtime
+  bump: major
+- name: frame-metadata-hash-extension
+  bump: patch
+- name: frame-system-benchmarking
+  bump: major
+- name: polkadot-sdk-frame
+  bump: major
+- name: rococo-runtime
+  bump: major
+- name: cumulus-pov-validator
+  bump: patch
+- name: penpal-runtime
+  bump: major
+- name: glutton-westend-runtime
+  bump: major
+- name: rococo-parachain-runtime
+  bump: major
diff --git a/prdoc/pr_6368.prdoc b/prdoc/pr_6368.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..4fd3963eb05ecf032130ad9d4a6607b87d043f10
--- /dev/null
+++ b/prdoc/pr_6368.prdoc
@@ -0,0 +1,7 @@
+title: Migrate inclusion benchmark to v2
+doc:
+- audience: Runtime Dev
+  description: Migrate inclusion benchmark to v2.
+crates:
+- name: polkadot-runtime-parachains
+  bump: patch
diff --git a/prdoc/pr_6446.prdoc b/prdoc/pr_6446.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..3bfe7d0c7a60b7132caf5555baf8f2c018539e6d
--- /dev/null
+++ b/prdoc/pr_6446.prdoc
@@ -0,0 +1,16 @@
+title: Make pallet-recovery supports `BlockNumberProvider`
+doc:
+- audience: Runtime Dev
+  description: |-
+      pallet-recovery now allows configuring the block provider to be utilized within this pallet. This block is employed for the delay in the recovery process.
+
+      A new associated type has been introduced in the `Config` trait: `BlockNumberProvider`. This can be assigned to `System` to maintain the previous behavior, or it can be set to another block number provider, such as `RelayChain`.
+
+      If the block provider is configured with a value different from `System`, a migration will be necessary for the `Recoverable` and `ActiveRecoveries` storage items.
+crates:
+- name: rococo-runtime
+  bump: major
+- name: westend-runtime
+  bump: major
+- name: pallet-recovery
+  bump: major
diff --git a/prdoc/pr_6689.prdoc b/prdoc/pr_6689.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..2cbb49cd7dd2457ab256a1325ca88c4f0830e57e
--- /dev/null
+++ b/prdoc/pr_6689.prdoc
@@ -0,0 +1,19 @@
+title: '[pallet-revive] Update gas encoding'
+doc:
+- audience: Runtime Dev
+  description: |-
+    Update the current approach to attach the `ref_time`, `pov` and `deposit` parameters to an Ethereum transaction.
+Previously, these three parameters were passed along with the signed payload, and the fees resulting from gas × gas_price were checked to ensure they matched the actual fees paid by the user for the extrinsic
+
+    This approach unfortunately can be attacked. A malicious actor could force such a transaction to fail by injecting low values for some of these extra parameters as they are not part of the signed payload.
+
+    The new approach encodes these 3 extra parameters in the lower digits of the transaction gas, using the log2 of the actual values to  encode each components on 2 digits
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
+- name: pallet-revive
+  bump: minor
+- name: asset-hub-westend-runtime
+  bump: minor
+- name: pallet-revive-mock-network
+  bump: minor
diff --git a/prdoc/pr_6820.prdoc b/prdoc/pr_6820.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..85249a33341dcec7fb08a63b281ce701db40a479
--- /dev/null
+++ b/prdoc/pr_6820.prdoc
@@ -0,0 +1,8 @@
+title: Add XCM benchmarks to collectives-westend
+doc:
+- audience: Runtime Dev
+  description: Collectives-westend was using `FixedWeightBounds`, meaning the same
+    weight per instruction. Added proper benchmarks.
+crates:
+- name: collectives-westend-runtime
+  bump: patch
diff --git a/prdoc/pr_6855.prdoc b/prdoc/pr_6855.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..a665115ce6c72ab2e147538018bbc33ee14820e9
--- /dev/null
+++ b/prdoc/pr_6855.prdoc
@@ -0,0 +1,16 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Snowbridge - Support bridging native ETH
+
+doc:
+  - audience: Runtime User
+    description: 
+      Support Native ETH as an asset type instead of only supporting WETH. WETH is still supported, but adds
+      support for ETH in the inbound and outbound routers.
+
+crates:
+  - name: snowbridge-router-primitives
+    bump: minor
+  - name: snowbridge-pallet-inbound-queue-fixtures
+    bump: minor
diff --git a/prdoc/pr_6979.prdoc b/prdoc/pr_6979.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..fae7feeec2df7c33c5845b405d52d180086b4820
--- /dev/null
+++ b/prdoc/pr_6979.prdoc
@@ -0,0 +1,8 @@
+title: Update prometheus binding failure logging format
+doc:
+- audience: Node Dev
+  description: |-
+    Using `{:#?}` for the error details is a bit annoying, this change makes a more consistent formatting style for error messages.
+crates:
+- name: substrate-prometheus-endpoint
+  bump: patch
diff --git a/prdoc/pr_6981.prdoc b/prdoc/pr_6981.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..8ed70e51ef454fafb84e1231b436e25f1fdd511b
--- /dev/null
+++ b/prdoc/pr_6981.prdoc
@@ -0,0 +1,7 @@
+title: '[pallet-revive] fix file case'
+doc:
+- audience: Runtime Dev
+  description: "fix https://github.com/paritytech/polkadot-sdk/issues/6970\r\n"
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/prdoc/pr_6986.prdoc b/prdoc/pr_6986.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..8deb6b04bd1cdd43fc37feab352aed4b56d16e9a
--- /dev/null
+++ b/prdoc/pr_6986.prdoc
@@ -0,0 +1,18 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: '[pallet-mixnet] Migrate to using frame umbrella crate'
+
+doc:
+  - audience: Runtime Dev
+    description: This PR migrates the pallet-mixnet to use the frame umbrella crate. This
+      is part of the ongoing effort to migrate all pallets to use the frame umbrella crate.
+      The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504).
+
+crates:
+  - name: pallet-mixnet
+    bump: minor
+  - name: polkadot-sdk-frame
+    bump: minor
+  - name: polkadot-sdk
+    bump: none
\ No newline at end of file
diff --git a/prdoc/pr_6988.prdoc b/prdoc/pr_6988.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..18f70f9fd97f1f316bec59a8072e89a8acec1c8b
--- /dev/null
+++ b/prdoc/pr_6988.prdoc
@@ -0,0 +1,5 @@
+doc: []
+
+crates:
+  - name: polkadot
+    bump: none
\ No newline at end of file
diff --git a/prdoc/pr_6989.prdoc b/prdoc/pr_6989.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..86c56698d41eec2403f50ec8bddaff689ee84318
--- /dev/null
+++ b/prdoc/pr_6989.prdoc
@@ -0,0 +1,10 @@
+title: 'paras-registrar: Improve error reporting'
+doc:
+- audience: Runtime User
+  description: |-
+    This pr improves the error reporting by paras registrar when an owner wants to access a locked parachain.
+
+    Closes: https://github.com/paritytech/polkadot-sdk/issues/6745
+crates:
+- name: polkadot-runtime-common
+  bump: patch
diff --git a/prdoc/pr_7005.prdoc b/prdoc/pr_7005.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..a61f7c5b9b714ab2f3ca56024f764fe8e04c4c9e
--- /dev/null
+++ b/prdoc/pr_7005.prdoc
@@ -0,0 +1,7 @@
+title: Log peerset set ID -> protocol name mapping
+doc:
+- audience: Node Dev
+  description: To simplify debugging of peerset related issues like https://github.com/paritytech/polkadot-sdk/issues/6573#issuecomment-2563091343.
+crates:
+- name: sc-network
+  bump: patch
diff --git a/prdoc/pr_7011.prdoc b/prdoc/pr_7011.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..55fe0c73ca091365fedec3dfe43a6c7988d9679f
--- /dev/null
+++ b/prdoc/pr_7011.prdoc
@@ -0,0 +1,16 @@
+title: 'sync: Send already connected peers to new subscribers'
+doc:
+- audience: Node Dev
+  description: |-
+    Introduce `SyncEvent::InitialPeers` message sent to new subscribers to allow them correctly tracking sync peers. This resolves a race condition described in https://github.com/paritytech/polkadot-sdk/issues/6573#issuecomment-2563091343.
+
+    Fixes https://github.com/paritytech/polkadot-sdk/issues/6573.
+crates:
+- name: sc-network-gossip
+  bump: major
+- name: sc-network-statement
+  bump: patch
+- name: sc-network-sync
+  bump: major
+- name: sc-network-transactions
+  bump: patch
diff --git a/prdoc/pr_7013.prdoc b/prdoc/pr_7013.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..138fa7f2310221b4023eca54ff646aaadd728c49
--- /dev/null
+++ b/prdoc/pr_7013.prdoc
@@ -0,0 +1,7 @@
+title: 'pallet-bounties: Fix benchmarks for 0 ED'
+doc:
+- audience: Runtime Dev
+  description: 'Closes: https://github.com/paritytech/polkadot-sdk/issues/7009'
+crates:
+- name: pallet-bounties
+  bump: patch
diff --git a/prdoc/pr_7020.prdoc b/prdoc/pr_7020.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..5bbdb44c45a0f71050223f8f018d8475650f8ab2
--- /dev/null
+++ b/prdoc/pr_7020.prdoc
@@ -0,0 +1,18 @@
+title: Remove warning log from frame-omni-bencher CLI
+doc:
+- audience: Node Operator
+  description: |-
+    # Description
+
+    This PR removes the outdated warning message from the `frame-omni-bencher` CLI that states the tool is "not yet battle tested". Fixes #7019
+
+    ## Integration
+
+    No integration steps are required.
+
+    ## Review Notes
+
+    The functionality of the tool remains unchanged. Removes the warning message from the CLI output.
+crates:
+- name: frame-omni-bencher
+  bump: patch
diff --git a/prdoc/pr_7021.prdoc b/prdoc/pr_7021.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..5443579bbd9295b6cfc507cd000da0a80951ef5b
--- /dev/null
+++ b/prdoc/pr_7021.prdoc
@@ -0,0 +1,8 @@
+title: Improve remote externalities logging
+doc:
+- audience: Node Dev
+  description: |-
+    Automatically detect if current env is tty. If not disable the spinner logging.
+crates:
+- name: frame-remote-externalities
+  bump: patch
diff --git a/prdoc/pr_7028.prdoc b/prdoc/pr_7028.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..ead918fc2e0077ef4972075705e1e0cc10447cc4
--- /dev/null
+++ b/prdoc/pr_7028.prdoc
@@ -0,0 +1,25 @@
+title: 'Fix implication order in implementation of `TransactionExtension` for tuple'
+doc:
+- audience:
+  - Runtime Dev
+  - Runtime User
+  description: |-
+    Before this PR, the implications were different in the pipeline `(A, B, C)` and `((A, B), C)`.
+    This PR fixes this behavior and make nested tuple transparant, the implication order of tuple of
+    tuple is now the same as in a single tuple.
+
+    For runtime users this mean that the implication can be breaking depending on the pipeline used
+    in the runtime.
+
+    For runtime developers this breaks usage of `TransactionExtension::validate`.
+    When calling `TransactionExtension::validate` the implication must now implement `Implication`
+    trait, you can use `TxBaseImplication` to wrap the type and use it as the base implication.
+    E.g. instead of `&(extension_version, call),` you can write `&TxBaseImplication((extension_version, call))`.
+
+crates:
+- name: sp-runtime
+  bump: major
+- name: pallet-skip-feeless-payment
+  bump: major
+- name: frame-system
+  bump: major
diff --git a/prdoc/pr_7030.prdoc b/prdoc/pr_7030.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..3b1f7be558d8f7d484c92d5e74446b39c37c0262
--- /dev/null
+++ b/prdoc/pr_7030.prdoc
@@ -0,0 +1,24 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: "[core-fellowship] Add permissionless import_member"
+
+doc:
+  - audience: [Runtime Dev, Runtime User]
+    description: |
+      Changes:
+      - Add call `import_member` to the core-fellowship pallet.
+      - Move common logic between `import` and `import_member` into `do_import`.
+
+      This is a minor change as to not impact UI and downstream integration.
+
+      ## `import_member`
+
+      Can be used to induct an arbitrary collective member and is callable by any signed origin. Pays no fees upon success.  
+      This is useful in the case that members did not induct themselves and are idling on their rank.
+
+crates:
+- name: pallet-core-fellowship
+  bump: major
+- name: collectives-westend-runtime
+  bump: patch
diff --git a/prdoc/pr_7040.prdoc b/prdoc/pr_7040.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..f88e96a703712b42894806b398deaa7856bbdb4f
--- /dev/null
+++ b/prdoc/pr_7040.prdoc
@@ -0,0 +1,16 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: '[pallet-node-authorization] Migrate to using frame umbrella crate'
+
+doc:
+  - audience: Runtime Dev
+    description: This PR migrates the pallet-node-authorization to use the frame umbrella crate. This
+      is part of the ongoing effort to migrate all pallets to use the frame umbrella crate.
+      The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504).
+
+crates:
+  - name: pallet-node-authorization
+    bump: minor
+  - name: polkadot-sdk-frame
+    bump: minor
diff --git a/prdoc/pr_7043.prdoc b/prdoc/pr_7043.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..d7f6cd6907c8588af8d500037d31d76b5ce59877
--- /dev/null
+++ b/prdoc/pr_7043.prdoc
@@ -0,0 +1,51 @@
+title: Remove usage of `sp-std` from Substrate
+doc:
+- audience: Runtime Dev
+  description: |-
+    # Description
+
+    This PR removes usage of deprecated `sp-std` from Substrate. (following PR of #5010)
+
+    ## Integration
+
+    This PR doesn't remove re-exported `sp_std` from any crates yet, so downstream projects using re-exported `sp_std` will not be affected.
+
+    ## Review Notes
+
+    The existing code using `sp-std` is refactored to use `alloc` and `core` directly. The key-value maps are instantiated from an array of tuples directly instead of using `sp_std::map!` macro.
+
+    This PR replaces `sp_std::Writer`, a helper type for using `Vec<u8>` with `core::fmt::Write` trait, with `alloc::string::String`.
+
+crates:
+- name: pallet-contracts
+  bump: patch
+- name: pallet-revive
+  bump: patch
+- name: sp-runtime
+  bump: patch
+- name: frame-support-procedural
+  bump: patch
+- name: frame-system
+  bump: patch
+- name: pallet-contracts-proc-macro
+  bump: patch
+- name: pallet-revive-proc-macro
+  bump: patch
+- name: frame-support
+  bump: patch
+- name: sc-sysinfo
+  bump: patch
+- name: pallet-bags-list-remote-tests
+  bump: patch
+- name: pallet-election-provider-e2e-test
+  bump: patch
+- name: pallet-nft-fractionalization
+  bump: patch
+- name: pallet-nomination-pools-test-delegate-stake
+  bump: patch
+- name: pallet-nomination-pools-test-transfer-stake
+  bump: patch
+- name: pallet-root-offences
+  bump: patch
+- name: pallet-uniques
+  bump: patch
diff --git a/prdoc/pr_7046.prdoc b/prdoc/pr_7046.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..113cc9c7aac5cc0d1b36cb2b8f0e3105dcc622b9
--- /dev/null
+++ b/prdoc/pr_7046.prdoc
@@ -0,0 +1,7 @@
+title: adding warning when using default substrateWeight in production
+doc:
+- audience: Runtime Dev
+  description: |-
+    PR for #3581
+    Added a cfg to show a deprecated warning message when using std
+crates: []
diff --git a/prdoc/pr_7048.prdoc b/prdoc/pr_7048.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..0f3856bc12876aeeae32350539e3fc3f350fdf45
--- /dev/null
+++ b/prdoc/pr_7048.prdoc
@@ -0,0 +1,17 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: '[pallet-salary] Migrate to using frame umbrella crate'
+
+doc:
+  - audience: Runtime Dev
+    description: >
+      This PR migrates the `pallet-salary` to use the FRAME umbrella crate.  
+      This is part of the ongoing effort to migrate all pallets to use the FRAME umbrella crate.  
+      The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504).
+
+crates:
+  - name: pallet-salary
+    bump: minor
+  - name: polkadot-sdk-frame
+    bump: minor
diff --git a/prdoc/pr_7050.prdoc b/prdoc/pr_7050.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..da9dd808033d02b8e51d1beac4f0cf218b926c87
--- /dev/null
+++ b/prdoc/pr_7050.prdoc
@@ -0,0 +1,11 @@
+title: Avoid incomplete block import pipeline with full verifying import queue
+doc:
+- audience: Node Dev
+  description: |-
+    When warp syncing a node using the equivocation checking verifier, we now properly set the fork_choice rule.
+    Affected are mostly nodes that are derived from the parachain template. Omni-node is not affected.
+
+    The prevents the error `ClientImport("Incomplete block import pipeline.")` after state sync.
+crates:
+- name: cumulus-client-consensus-aura
+  bump: patch
diff --git a/prdoc/pr_7069.prdoc b/prdoc/pr_7069.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..a0fc5cafb020b4f23c1a1a61a21b9e9ce33c8478
--- /dev/null
+++ b/prdoc/pr_7069.prdoc
@@ -0,0 +1,10 @@
+title: Fix defensive! macro to be used in umbrella crates
+doc:
+- audience: Runtime Dev
+  description: |-
+    PR for #7054
+
+    Replaced frame_support with $crate from @gui1117 's suggestion to fix the dependency issue
+crates:
+- name: frame-support
+  bump: patch
diff --git a/prdoc/pr_7073.prdoc b/prdoc/pr_7073.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..3bcd129d03172cf7e52e4b59ad505bf36ca01043
--- /dev/null
+++ b/prdoc/pr_7073.prdoc
@@ -0,0 +1,16 @@
+title: Implement NetworkRequest for litep2p
+doc:
+- audience: Node Dev
+  description: |-
+    # Description
+
+    Implements NetworkRequest::request for litep2p that we need for networking benchmarks
+
+
+    ## Review Notes
+
+    Duplicates implementation for NetworkService
+    https://github.com/paritytech/polkadot-sdk/blob/5bf9dd2aa9bf944434203128783925bdc2ad8c01/substrate/client/network/src/service.rs#L1186-L1205
+crates:
+- name: sc-network
+  bump: patch
diff --git a/prdoc/pr_7074.prdoc b/prdoc/pr_7074.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..d49e5f8d831fe9cca6096f85c9c6a33b0279301c
--- /dev/null
+++ b/prdoc/pr_7074.prdoc
@@ -0,0 +1,13 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Unset SKIP_WASM_BUILD=1 for aarch64 binaries release 
+
+doc:
+  - audience: [ Node Dev, Runtime Dev]
+    description:
+      Fix the release pipeline environment by unsetting SKIP_WASM_BUILD=1
+      so that aarch64 binaries are built so that they contain runtimes
+      accordingly.
+
+crates: [ ]
diff --git a/prdoc/pr_7102.prdoc b/prdoc/pr_7102.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..b1923aafc3db4a755b4e79f8961a9eda52d09038
--- /dev/null
+++ b/prdoc/pr_7102.prdoc
@@ -0,0 +1,8 @@
+title: '`fatxpool`: rotator cache size now depends on pool''s limits'
+doc:
+- audience: Node Dev
+  description: |-
+    This PR modifies the hard-coded size of extrinsics cache within `PoolRotator` to be inline with pool limits. It only applies to fork-aware transaction pool. For the legacy (single-state) transaction pool the logic remains untouched.
+crates:
+- name: sc-transaction-pool
+  bump: minor
diff --git a/prdoc/pr_7116.prdoc b/prdoc/pr_7116.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..95a5254778a4d3add09d9219f3d170beb4f9f8dd
--- /dev/null
+++ b/prdoc/pr_7116.prdoc
@@ -0,0 +1,8 @@
+title: Increase the number of pvf execution workers from 2 to 4
+doc:
+- audience: Node Dev
+  description: |-
+    Increase the number of pvf execution workers from 2 to 4.
+crates:
+- name: polkadot-service
+  bump: patch
diff --git a/prdoc/pr_7126.prdoc b/prdoc/pr_7126.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..1a86af1b2d1da8ade29829a01f83753637e915dd
--- /dev/null
+++ b/prdoc/pr_7126.prdoc
@@ -0,0 +1,7 @@
+title: 'xcm: Fixes for `UnpaidLocalExporter`'
+doc:
+- audience: Runtime Dev
+  description: This PR deprecates `UnpaidLocalExporter` in favor of the new `LocalExporter`. First, the name is misleading, as it can be used in both paid and unpaid scenarios. Second, it contains a hard-coded channel 0, whereas `LocalExporter` uses the same algorithm as `xcm-exporter`.
+crates:
+- name: staging-xcm-builder
+  bump: minor
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index 5f6806c235f6f9c1753df533c9c111bf6ca85f44..e531097dbb5e8adf16d7e8f7de60cba980fe27be 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -138,6 +138,7 @@ pub fn create_extrinsic(
 				>::from(tip, None),
 			),
 			frame_metadata_hash_extension::CheckMetadataHash::new(false),
+			frame_system::WeightReclaim::<kitchensink_runtime::Runtime>::new(),
 		);
 
 	let raw_payload = kitchensink_runtime::SignedPayload::from_raw(
@@ -153,6 +154,7 @@ pub fn create_extrinsic(
 			(),
 			(),
 			None,
+			(),
 		),
 	);
 	let signature = raw_payload.using_encoded(|e| sender.sign(e));
@@ -1060,6 +1062,7 @@ mod tests {
 				let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from(
 					pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None),
 				);
+				let weight_reclaim = frame_system::WeightReclaim::new();
 				let metadata_hash = frame_metadata_hash_extension::CheckMetadataHash::new(false);
 				let tx_ext: TxExtension = (
 					check_non_zero_sender,
@@ -1071,6 +1074,7 @@ mod tests {
 					check_weight,
 					tx_payment,
 					metadata_hash,
+					weight_reclaim,
 				);
 				let raw_payload = SignedPayload::from_raw(
 					function,
@@ -1085,6 +1089,7 @@ mod tests {
 						(),
 						(),
 						None,
+						(),
 					),
 				);
 				let signature = raw_payload.using_encoded(|payload| signer.sign(payload));
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 4a031e3cad6d928462dc950ed68aa2cc6f6453ed..b8ef41130c0e18e7a7bcea155ce1929afe8f9169 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -1475,6 +1475,7 @@ impl pallet_revive::Config for Runtime {
 	type Xcm = ();
 	type ChainId = ConstU64<420_420_420>;
 	type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12.
+	type EthGasEncoder = ();
 }
 
 impl pallet_sudo::Config for Runtime {
@@ -1539,6 +1540,7 @@ where
 				),
 			),
 			frame_metadata_hash_extension::CheckMetadataHash::new(false),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		);
 
 		let raw_payload = SignedPayload::new(call, tx_ext)
@@ -1660,6 +1662,7 @@ impl pallet_recovery::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type WeightInfo = pallet_recovery::weights::SubstrateWeight<Runtime>;
 	type RuntimeCall = RuntimeCall;
+	type BlockNumberProvider = System;
 	type Currency = Balances;
 	type ConfigDepositBase = ConfigDepositBase;
 	type FriendDepositFactor = FriendDepositFactor;
@@ -2733,6 +2736,7 @@ pub type TxExtension = (
 		pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
 	>,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 #[derive(Clone, PartialEq, Eq, Debug)]
@@ -2754,6 +2758,7 @@ impl EthExtra for EthExtraImpl {
 			pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::<Runtime>::from(tip, None)
 				.into(),
 			frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(false),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		)
 	}
 }
@@ -3035,7 +3040,7 @@ impl_runtime_apis! {
 		}
 
 		fn current_set_id() -> sp_consensus_grandpa::SetId {
-			Grandpa::current_set_id()
+			pallet_grandpa::CurrentSetId::<Runtime>::get()
 		}
 
 		fn submit_report_equivocation_unsigned_extrinsic(
diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs
index e5b0299f01a83b2756e50aae44fc781aa35c6ee3..08d6ad6dcc35d95740f2ac684ed5c6265d023c6a 100644
--- a/substrate/bin/node/testing/src/keyring.rs
+++ b/substrate/bin/node/testing/src/keyring.rs
@@ -86,6 +86,7 @@ pub fn tx_ext(nonce: Nonce, extra_fee: Balance) -> TxExtension {
 			pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None),
 		),
 		frame_metadata_hash_extension::CheckMetadataHash::new(false),
+		frame_system::WeightReclaim::new(),
 	)
 }
 
diff --git a/substrate/client/allocator/src/freeing_bump.rs b/substrate/client/allocator/src/freeing_bump.rs
index 144c0764540db3f14f6f9bea5b113506f92879d9..405916adc3c3ff1bba878b7513fb1125c2f3462a 100644
--- a/substrate/client/allocator/src/freeing_bump.rs
+++ b/substrate/client/allocator/src/freeing_bump.rs
@@ -182,7 +182,7 @@ const NIL_MARKER: u32 = u32::MAX;
 enum Link {
 	/// Nil, denotes that there is no next element.
 	Nil,
-	/// Link to the next element represented as a pointer to the a header.
+	/// Link to the next element represented as a pointer to the header.
 	Ptr(u32),
 }
 
diff --git a/substrate/client/api/src/proof_provider.rs b/substrate/client/api/src/proof_provider.rs
index 7f60f856ae8095b9d8f9e445f479cbb549473074..9043d3482723911c90263c80045af36facea8db0 100644
--- a/substrate/client/api/src/proof_provider.rs
+++ b/substrate/client/api/src/proof_provider.rs
@@ -82,7 +82,7 @@ pub trait ProofProvider<Block: BlockT> {
 	) -> sp_blockchain::Result<Vec<(KeyValueStorageLevel, bool)>>;
 
 	/// Verify read storage proof for a set of keys.
-	/// Returns collected key-value pairs and a the nested state
+	/// Returns collected key-value pairs and the nested state
 	/// depth of current iteration or 0 if completed.
 	fn verify_range_proof(
 		&self,
diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs
index 2daf1e49ee4b49c0e45f3efd0baeef1488b319c6..bff258a9a011bbf9cb6b1bcee319044f54b830de 100644
--- a/substrate/client/network-gossip/src/bridge.rs
+++ b/substrate/client/network-gossip/src/bridge.rs
@@ -254,10 +254,12 @@ impl<B: BlockT> Future for GossipEngine<B> {
 
 					match sync_event_stream {
 						Poll::Ready(Some(event)) => match event {
-							SyncEvent::PeerConnected(remote) =>
-								this.network.add_set_reserved(remote, this.protocol.clone()),
-							SyncEvent::PeerDisconnected(remote) =>
-								this.network.remove_set_reserved(remote, this.protocol.clone()),
+							SyncEvent::InitialPeers(peer_ids) =>
+								this.network.add_set_reserved(peer_ids, this.protocol.clone()),
+							SyncEvent::PeerConnected(peer_id) =>
+								this.network.add_set_reserved(vec![peer_id], this.protocol.clone()),
+							SyncEvent::PeerDisconnected(peer_id) =>
+								this.network.remove_set_reserved(peer_id, this.protocol.clone()),
 						},
 						// The sync event stream closed. Do the same for [`GossipValidator`].
 						Poll::Ready(None) => {
diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs
index 20d9922200c2c3c6cf394692099a21c01b540bd0..2ec573bf9e3ef8056f6fc6309110aa26d7e7e44b 100644
--- a/substrate/client/network-gossip/src/lib.rs
+++ b/substrate/client/network-gossip/src/lib.rs
@@ -82,15 +82,18 @@ mod validator;
 
 /// Abstraction over a network.
 pub trait Network<B: BlockT>: NetworkPeers + NetworkEventStream {
-	fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) {
-		let addr = Multiaddr::empty().with(Protocol::P2p(*who.as_ref()));
-		let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect());
+	fn add_set_reserved(&self, peer_ids: Vec<PeerId>, protocol: ProtocolName) {
+		let addrs = peer_ids
+			.into_iter()
+			.map(|peer_id| Multiaddr::empty().with(Protocol::P2p(peer_id.into())))
+			.collect();
+		let result = self.add_peers_to_reserved_set(protocol, addrs);
 		if let Err(err) = result {
 			log::error!(target: "gossip", "add_set_reserved failed: {}", err);
 		}
 	}
-	fn remove_set_reserved(&self, who: PeerId, protocol: ProtocolName) {
-		let result = self.remove_peers_from_reserved_set(protocol, iter::once(who).collect());
+	fn remove_set_reserved(&self, peer_id: PeerId, protocol: ProtocolName) {
+		let result = self.remove_peers_from_reserved_set(protocol, iter::once(peer_id).collect());
 		if let Err(err) = result {
 			log::error!(target: "gossip", "remove_set_reserved failed: {}", err);
 		}
diff --git a/substrate/client/network/README.md b/substrate/client/network/README.md
index f4031fbd308539c6235f3bc2697dfa13136f039d..4336bb78533ce5676e57f9471a952d1179bb1b65 100644
--- a/substrate/client/network/README.md
+++ b/substrate/client/network/README.md
@@ -245,7 +245,7 @@ only downloads finalized authority set changes.
 GRANDPA keeps justifications for each finalized authority set change. Each change is signed by the
 authorities from the previous set. By downloading and verifying these signed hand-offs starting from genesis,
 we arrive at a recent header faster than downloading full header chain. Each `WarpSyncRequest` contains a block
-hash to a to start collecting proofs from. `WarpSyncResponse` contains a sequence of block headers and
+hash to start collecting proofs from. `WarpSyncResponse` contains a sequence of block headers and
 justifications. The proof downloader checks the justifications and continues requesting proofs from the last
 header hash, until it arrives at some recent header.
 
@@ -261,7 +261,7 @@ data. I.e. it is unable to serve bock bodies and headers other than the most rec
 nodes have block history available, a background sync process is started that downloads all the missing blocks.
 It is run in parallel with the keep-up sync and does not interfere with downloading of the recent blocks.
 During this download we also import GRANDPA justifications for blocks with authority set changes, so that
-the warp-synced node has all the data to serve for other nodes nodes that might want to sync from it with
+the warp-synced node has all the data to serve for other nodes that might want to sync from it with
 any method.
 
 # Usage
diff --git a/substrate/client/network/benches/notifications_protocol.rs b/substrate/client/network/benches/notifications_protocol.rs
index 40a810d616b599ab8a0206a156ead6e09f1e2edc..a406e328d5a64a678083c8df33df8e47409ec82b 100644
--- a/substrate/client/network/benches/notifications_protocol.rs
+++ b/substrate/client/network/benches/notifications_protocol.rs
@@ -36,19 +36,16 @@ use std::{sync::Arc, time::Duration};
 use substrate_test_runtime_client::runtime;
 use tokio::{sync::Mutex, task::JoinHandle};
 
-const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[
-	// (Exponent of size, number of notifications, label)
-	(6, 100, "64B"),
-	(9, 100, "512B"),
-	(12, 100, "4KB"),
-	(15, 100, "64KB"),
-];
-const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[
-	// (Exponent of size, number of notifications, label)
-	(18, 10, "256KB"),
-	(21, 10, "2MB"),
-	(24, 10, "16MB"),
-	(27, 10, "128MB"),
+const NUMBER_OF_NOTIFICATIONS: usize = 100;
+const PAYLOAD: &[(u32, &'static str)] = &[
+	// (Exponent of size, label)
+	(6, "64B"),
+	(9, "512B"),
+	(12, "4KB"),
+	(15, "64KB"),
+	(18, "256KB"),
+	(21, "2MB"),
+	(24, "16MB"),
 ];
 const MAX_SIZE: u64 = 2u64.pow(30);
 
@@ -156,12 +153,19 @@ where
 				tokio::select! {
 					Some(event) = notification_service1.next_event() => {
 						if let NotificationEvent::NotificationStreamOpened { .. } = event {
-							break;
+							// Send a 32MB notification to preheat the network
+							notification_service1.send_async_notification(&peer_id2, vec![0; 2usize.pow(25)]).await.unwrap();
 						}
 					},
 					Some(event) = notification_service2.next_event() => {
-						if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event {
-							result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap();
+						match event {
+							NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
+								result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap();
+							},
+							NotificationEvent::NotificationReceived { .. } => {
+								break;
+							}
+							_ => {}
 						}
 					},
 				}
@@ -255,64 +259,53 @@ async fn run_with_backpressure(setup: Arc<BenchSetup>, size: usize, limit: usize
 	let _ = tokio::join!(network1, network2);
 }
 
-fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) {
+fn run_benchmark(c: &mut Criterion) {
 	let rt = tokio::runtime::Runtime::new().unwrap();
 	let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
-	let mut group = c.benchmark_group(group);
+	let mut group = c.benchmark_group("notifications_protocol");
 	group.plot_config(plot_config);
+	group.sample_size(10);
 
 	let libp2p_setup = setup_workers::<runtime::Block, runtime::Hash, NetworkWorker<_, _>>(&rt);
-	for &(exponent, limit, label) in payload.iter() {
+	for &(exponent, label) in PAYLOAD.iter() {
 		let size = 2usize.pow(exponent);
-		group.throughput(Throughput::Bytes(limit as u64 * size as u64));
-		group.bench_with_input(
-			BenchmarkId::new("libp2p/serially", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit));
-			},
-		);
+		group.throughput(Throughput::Bytes(NUMBER_OF_NOTIFICATIONS as u64 * size as u64));
+		group.bench_with_input(BenchmarkId::new("libp2p/serially", label), &size, |b, &size| {
+			b.to_async(&rt)
+				.iter(|| run_serially(Arc::clone(&libp2p_setup), size, NUMBER_OF_NOTIFICATIONS));
+		});
 		group.bench_with_input(
 			BenchmarkId::new("libp2p/with_backpressure", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt)
-					.iter(|| run_with_backpressure(Arc::clone(&libp2p_setup), size, limit));
+			&size,
+			|b, &size| {
+				b.to_async(&rt).iter(|| {
+					run_with_backpressure(Arc::clone(&libp2p_setup), size, NUMBER_OF_NOTIFICATIONS)
+				});
 			},
 		);
 	}
 	drop(libp2p_setup);
 
 	let litep2p_setup = setup_workers::<runtime::Block, runtime::Hash, Litep2pNetworkBackend>(&rt);
-	for &(exponent, limit, label) in payload.iter() {
+	for &(exponent, label) in PAYLOAD.iter() {
 		let size = 2usize.pow(exponent);
-		group.throughput(Throughput::Bytes(limit as u64 * size as u64));
-		group.bench_with_input(
-			BenchmarkId::new("litep2p/serially", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit));
-			},
-		);
+		group.throughput(Throughput::Bytes(NUMBER_OF_NOTIFICATIONS as u64 * size as u64));
+		group.bench_with_input(BenchmarkId::new("litep2p/serially", label), &size, |b, &size| {
+			b.to_async(&rt)
+				.iter(|| run_serially(Arc::clone(&litep2p_setup), size, NUMBER_OF_NOTIFICATIONS));
+		});
 		group.bench_with_input(
 			BenchmarkId::new("litep2p/with_backpressure", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt)
-					.iter(|| run_with_backpressure(Arc::clone(&litep2p_setup), size, limit));
+			&size,
+			|b, &size| {
+				b.to_async(&rt).iter(|| {
+					run_with_backpressure(Arc::clone(&litep2p_setup), size, NUMBER_OF_NOTIFICATIONS)
+				});
 			},
 		);
 	}
 	drop(litep2p_setup);
 }
 
-fn run_benchmark_with_small_payload(c: &mut Criterion) {
-	run_benchmark(c, SMALL_PAYLOAD, "notifications_protocol/small_payload");
-}
-
-fn run_benchmark_with_large_payload(c: &mut Criterion) {
-	run_benchmark(c, LARGE_PAYLOAD, "notifications_protocol/large_payload");
-}
-
-criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload);
+criterion_group!(benches, run_benchmark);
 criterion_main!(benches);
diff --git a/substrate/client/network/benches/request_response_protocol.rs b/substrate/client/network/benches/request_response_protocol.rs
index 85381112b7538f16a8033e0efe310aef1038bf2e..97c6d72ddf1ef78524810b6bcb5e2175e15a0d9f 100644
--- a/substrate/client/network/benches/request_response_protocol.rs
+++ b/substrate/client/network/benches/request_response_protocol.rs
@@ -37,19 +37,16 @@ use substrate_test_runtime_client::runtime;
 use tokio::{sync::Mutex, task::JoinHandle};
 
 const MAX_SIZE: u64 = 2u64.pow(30);
-const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[
-	// (Exponent of size, number of requests, label)
-	(6, 100, "64B"),
-	(9, 100, "512B"),
-	(12, 100, "4KB"),
-	(15, 100, "64KB"),
-];
-const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[
-	// (Exponent of size, number of requests, label)
-	(18, 10, "256KB"),
-	(21, 10, "2MB"),
-	(24, 10, "16MB"),
-	(27, 10, "128MB"),
+const NUMBER_OF_REQUESTS: usize = 100;
+const PAYLOAD: &[(u32, &'static str)] = &[
+	// (Exponent of size, label)
+	(6, "64B"),
+	(9, "512B"),
+	(12, "4KB"),
+	(15, "64KB"),
+	(18, "256KB"),
+	(21, "2MB"),
+	(24, "16MB"),
 ];
 
 pub fn create_network_worker<B, H, N>() -> (
@@ -154,6 +151,21 @@ where
 	let handle1 = tokio::spawn(worker1.run());
 	let handle2 = tokio::spawn(worker2.run());
 
+	let _ = tokio::spawn({
+		let rx2 = rx2.clone();
+
+		async move {
+			let req = rx2.recv().await.unwrap();
+			req.pending_response
+				.send(OutgoingResponse {
+					result: Ok(vec![0; 2usize.pow(25)]),
+					reputation_changes: vec![],
+					sent_feedback: None,
+				})
+				.unwrap();
+		}
+	});
+
 	let ready = tokio::spawn({
 		let network_service1 = Arc::clone(&network_service1);
 
@@ -165,6 +177,16 @@ where
 				network_service2.listen_addresses()[0].clone()
 			};
 			network_service1.add_known_address(peer_id2, listen_address2.into());
+			let _ = network_service1
+				.request(
+					peer_id2.into(),
+					"/request-response/1".into(),
+					vec![0; 2],
+					None,
+					IfDisconnected::TryConnect,
+				)
+				.await
+				.unwrap();
 		}
 	});
 
@@ -210,8 +232,8 @@ async fn run_serially(setup: Arc<BenchSetup>, size: usize, limit: usize) {
 		async move {
 			loop {
 				tokio::select! {
-					res = rx2.recv() => {
-						let IncomingRequest { pending_response, .. } = res.unwrap();
+					req = rx2.recv() => {
+						let IncomingRequest { pending_response, .. } = req.unwrap();
 						pending_response.send(OutgoingResponse {
 							result: Ok(vec![0; size]),
 							reputation_changes: vec![],
@@ -269,49 +291,35 @@ async fn run_with_backpressure(setup: Arc<BenchSetup>, size: usize, limit: usize
 	let _ = tokio::join!(network1, network2);
 }
 
-fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) {
+fn run_benchmark(c: &mut Criterion) {
 	let rt = tokio::runtime::Runtime::new().unwrap();
 	let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
-	let mut group = c.benchmark_group(group);
+	let mut group = c.benchmark_group("request_response_protocol");
 	group.plot_config(plot_config);
+	group.sample_size(10);
 
 	let libp2p_setup = setup_workers::<runtime::Block, runtime::Hash, NetworkWorker<_, _>>(&rt);
-	for &(exponent, limit, label) in payload.iter() {
+	for &(exponent, label) in PAYLOAD.iter() {
 		let size = 2usize.pow(exponent);
-		group.throughput(Throughput::Bytes(limit as u64 * size as u64));
-		group.bench_with_input(
-			BenchmarkId::new("libp2p/serially", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit));
-			},
-		);
+		group.throughput(Throughput::Bytes(NUMBER_OF_REQUESTS as u64 * size as u64));
+		group.bench_with_input(BenchmarkId::new("libp2p/serially", label), &size, |b, &size| {
+			b.to_async(&rt)
+				.iter(|| run_serially(Arc::clone(&libp2p_setup), size, NUMBER_OF_REQUESTS));
+		});
 	}
 	drop(libp2p_setup);
 
-	// TODO: NetworkRequest::request should be implemented for Litep2pNetworkService
 	let litep2p_setup = setup_workers::<runtime::Block, runtime::Hash, Litep2pNetworkBackend>(&rt);
-	// for &(exponent, limit, label) in payload.iter() {
-	// 	let size = 2usize.pow(exponent);
-	// 	group.throughput(Throughput::Bytes(limit as u64 * size as u64));
-	// 	group.bench_with_input(
-	// 		BenchmarkId::new("litep2p/serially", label),
-	// 		&(size, limit),
-	// 		|b, &(size, limit)| {
-	// 			b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit));
-	// 		},
-	// 	);
-	// }
+	for &(exponent, label) in PAYLOAD.iter() {
+		let size = 2usize.pow(exponent);
+		group.throughput(Throughput::Bytes(NUMBER_OF_REQUESTS as u64 * size as u64));
+		group.bench_with_input(BenchmarkId::new("litep2p/serially", label), &size, |b, &size| {
+			b.to_async(&rt)
+				.iter(|| run_serially(Arc::clone(&litep2p_setup), size, NUMBER_OF_REQUESTS));
+		});
+	}
 	drop(litep2p_setup);
 }
 
-fn run_benchmark_with_small_payload(c: &mut Criterion) {
-	run_benchmark(c, SMALL_PAYLOAD, "request_response_benchmark/small_payload");
-}
-
-fn run_benchmark_with_large_payload(c: &mut Criterion) {
-	run_benchmark(c, LARGE_PAYLOAD, "request_response_benchmark/large_payload");
-}
-
-criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload);
+criterion_group!(benches, run_benchmark);
 criterion_main!(benches);
diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs
index d270e90efdf565f920748e18db992300a7589604..2d4a117d15631fea4dc5cdf0643fb5bbae06c66e 100644
--- a/substrate/client/network/src/litep2p/service.rs
+++ b/substrate/client/network/src/litep2p/service.rs
@@ -28,8 +28,8 @@ use crate::{
 	peer_store::PeerStoreProvider,
 	service::out_events,
 	Event, IfDisconnected, NetworkDHTProvider, NetworkEventStream, NetworkPeers, NetworkRequest,
-	NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider, ProtocolName,
-	RequestFailure, Signature,
+	NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider, OutboundFailure,
+	ProtocolName, RequestFailure, Signature,
 };
 
 use codec::DecodeAll;
@@ -526,13 +526,23 @@ impl NetworkStateInfo for Litep2pNetworkService {
 impl NetworkRequest for Litep2pNetworkService {
 	async fn request(
 		&self,
-		_target: PeerId,
-		_protocol: ProtocolName,
-		_request: Vec<u8>,
-		_fallback_request: Option<(Vec<u8>, ProtocolName)>,
-		_connect: IfDisconnected,
+		target: PeerId,
+		protocol: ProtocolName,
+		request: Vec<u8>,
+		fallback_request: Option<(Vec<u8>, ProtocolName)>,
+		connect: IfDisconnected,
 	) -> Result<(Vec<u8>, ProtocolName), RequestFailure> {
-		unimplemented!();
+		let (tx, rx) = oneshot::channel();
+
+		self.start_request(target, protocol, request, fallback_request, tx, connect);
+
+		match rx.await {
+			Ok(v) => v,
+			// The channel can only be closed if the network worker no longer exists. If the
+			// network worker no longer exists, then all connections to `target` are necessarily
+			// closed, and we legitimately report this situation as a "ConnectionClosed".
+			Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)),
+		}
 	}
 
 	fn start_request(
diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs
index 6da1d601b34fc9c1ec4234df3308dbcb9750e895..81e1848adefa277d86e6bd05c8941bbda13e8cda 100644
--- a/substrate/client/network/src/protocol.rs
+++ b/substrate/client/network/src/protocol.rs
@@ -34,7 +34,7 @@ use libp2p::{
 	},
 	Multiaddr, PeerId,
 };
-use log::warn;
+use log::{debug, warn};
 
 use codec::DecodeAll;
 use sc_network_common::role::Roles;
@@ -53,6 +53,9 @@ mod notifications;
 
 pub mod message;
 
+// Log target for this file.
+const LOG_TARGET: &str = "sub-libp2p";
+
 /// Maximum size used for notifications in the block announce and transaction protocols.
 // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`.
 pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = MAX_RESPONSE_SIZE;
@@ -124,6 +127,10 @@ impl<B: BlockT> Protocol<B> {
 				handle.set_metrics(notification_metrics.clone());
 			});
 
+			protocol_configs.iter().enumerate().for_each(|(i, (p, _, _))| {
+				debug!(target: LOG_TARGET, "Notifications protocol {:?}: {}", SetId::from(i), p.name);
+			});
+
 			(
 				Notifications::new(
 					protocol_controller_handles,
@@ -164,7 +171,7 @@ impl<B: BlockT> Protocol<B> {
 		{
 			self.behaviour.disconnect_peer(peer_id, SetId::from(position));
 		} else {
-			warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name")
+			warn!(target: LOG_TARGET, "disconnect_peer() with invalid protocol name")
 		}
 	}
 
diff --git a/substrate/client/network/statement/src/lib.rs b/substrate/client/network/statement/src/lib.rs
index df93788696e381b18e80cff73646bb1594636386..586a15cadd68eeb8d0615c01c7bbf33177a5113a 100644
--- a/substrate/client/network/statement/src/lib.rs
+++ b/substrate/client/network/statement/src/lib.rs
@@ -33,7 +33,8 @@ use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered, FutureExt}
 use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
 use sc_network::{
 	config::{NonReservedPeerMode, SetConfig},
-	error, multiaddr,
+	error,
+	multiaddr::{Multiaddr, Protocol},
 	peer_store::PeerStoreProvider,
 	service::{
 		traits::{NotificationEvent, NotificationService, ValidationResult},
@@ -296,9 +297,19 @@ where
 
 	fn handle_sync_event(&mut self, event: SyncEvent) {
 		match event {
-			SyncEvent::PeerConnected(remote) => {
-				let addr = iter::once(multiaddr::Protocol::P2p(remote.into()))
-					.collect::<multiaddr::Multiaddr>();
+			SyncEvent::InitialPeers(peer_ids) => {
+				let addrs = peer_ids
+					.into_iter()
+					.map(|peer_id| Multiaddr::empty().with(Protocol::P2p(peer_id.into())))
+					.collect();
+				let result =
+					self.network.add_peers_to_reserved_set(self.protocol_name.clone(), addrs);
+				if let Err(err) = result {
+					log::error!(target: LOG_TARGET, "Add reserved peers failed: {}", err);
+				}
+			},
+			SyncEvent::PeerConnected(peer_id) => {
+				let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into()));
 				let result = self.network.add_peers_to_reserved_set(
 					self.protocol_name.clone(),
 					iter::once(addr).collect(),
@@ -307,10 +318,10 @@ where
 					log::error!(target: LOG_TARGET, "Add reserved peer failed: {}", err);
 				}
 			},
-			SyncEvent::PeerDisconnected(remote) => {
+			SyncEvent::PeerDisconnected(peer_id) => {
 				let result = self.network.remove_peers_from_reserved_set(
 					self.protocol_name.clone(),
-					iter::once(remote).collect(),
+					iter::once(peer_id).collect(),
 				);
 				if let Err(err) = result {
 					log::error!(target: LOG_TARGET, "Failed to remove reserved peer: {err}");
diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs
index 0c39ea0b93c040c17870fb7011fe667d4af0317c..4003361525e18c45d73cb95c20376c337251cade 100644
--- a/substrate/client/network/sync/src/engine.rs
+++ b/substrate/client/network/sync/src/engine.rs
@@ -656,7 +656,11 @@ where
 			ToServiceCommand::SetSyncForkRequest(peers, hash, number) => {
 				self.strategy.set_sync_fork_request(peers, &hash, number);
 			},
-			ToServiceCommand::EventStream(tx) => self.event_streams.push(tx),
+			ToServiceCommand::EventStream(tx) => {
+				let _ = tx
+					.unbounded_send(SyncEvent::InitialPeers(self.peers.keys().cloned().collect()));
+				self.event_streams.push(tx);
+			},
 			ToServiceCommand::RequestJustification(hash, number) =>
 				self.strategy.request_justification(&hash, number),
 			ToServiceCommand::ClearJustificationRequests =>
diff --git a/substrate/client/network/sync/src/types.rs b/substrate/client/network/sync/src/types.rs
index 5745a34378df68f65446953b55c8a0ce39f3c3d0..a72a2f7c1ffe475fc4c78263a0766be484af7663 100644
--- a/substrate/client/network/sync/src/types.rs
+++ b/substrate/client/network/sync/src/types.rs
@@ -127,6 +127,10 @@ where
 
 /// Syncing-related events that other protocols can subscribe to.
 pub enum SyncEvent {
+	/// All connected peers that the syncing implementation is tracking.
+	/// Always sent as the first message to the stream.
+	InitialPeers(Vec<PeerId>),
+
 	/// Peer that the syncing implementation is tracking connected.
 	PeerConnected(PeerId),
 
diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs
index 44fa702ef6d4f7bff2e4046728508922ab1fa00a..49f429a04ee2e61861d3871cfc042df92e23c5c0 100644
--- a/substrate/client/network/transactions/src/lib.rs
+++ b/substrate/client/network/transactions/src/lib.rs
@@ -35,7 +35,8 @@ use log::{debug, trace, warn};
 use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
 use sc_network::{
 	config::{NonReservedPeerMode, ProtocolId, SetConfig},
-	error, multiaddr,
+	error,
+	multiaddr::{Multiaddr, Protocol},
 	peer_store::PeerStoreProvider,
 	service::{
 		traits::{NotificationEvent, NotificationService, ValidationResult},
@@ -377,9 +378,19 @@ where
 
 	fn handle_sync_event(&mut self, event: SyncEvent) {
 		match event {
-			SyncEvent::PeerConnected(remote) => {
-				let addr = iter::once(multiaddr::Protocol::P2p(remote.into()))
-					.collect::<multiaddr::Multiaddr>();
+			SyncEvent::InitialPeers(peer_ids) => {
+				let addrs = peer_ids
+					.into_iter()
+					.map(|peer_id| Multiaddr::empty().with(Protocol::P2p(peer_id.into())))
+					.collect();
+				let result =
+					self.network.add_peers_to_reserved_set(self.protocol_name.clone(), addrs);
+				if let Err(err) = result {
+					log::error!(target: LOG_TARGET, "Add reserved peers failed: {}", err);
+				}
+			},
+			SyncEvent::PeerConnected(peer_id) => {
+				let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into()));
 				let result = self.network.add_peers_to_reserved_set(
 					self.protocol_name.clone(),
 					iter::once(addr).collect(),
@@ -388,10 +399,10 @@ where
 					log::error!(target: LOG_TARGET, "Add reserved peer failed: {}", err);
 				}
 			},
-			SyncEvent::PeerDisconnected(remote) => {
+			SyncEvent::PeerDisconnected(peer_id) => {
 				let result = self.network.remove_peers_from_reserved_set(
 					self.protocol_name.clone(),
-					iter::once(remote).collect(),
+					iter::once(peer_id).collect(),
 				);
 				if let Err(err) = result {
 					log::error!(target: LOG_TARGET, "Remove reserved peer failed: {}", err);
diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml
index c7eed77eda7f0d23afeb4ebfe94597e5f61feeff..afc464c358811191f6deaa5afe1e6fdc90d269c8 100644
--- a/substrate/client/sysinfo/Cargo.toml
+++ b/substrate/client/sysinfo/Cargo.toml
@@ -30,7 +30,6 @@ serde_json = { workspace = true, default-features = true }
 sp-core = { workspace = true, default-features = true }
 sp-crypto-hashing = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 [dev-dependencies]
 sp-runtime = { workspace = true, default-features = true }
diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs
index 5e40b0fb72d6b98cf28a5afdfd6eec962282812d..5ba9dd40c15680ee2c5c83803f8bc62b59e96c57 100644
--- a/substrate/client/transaction-pool/benches/basics.rs
+++ b/substrate/client/transaction-pool/benches/basics.rs
@@ -197,14 +197,22 @@ fn benchmark_main(c: &mut Criterion) {
 	c.bench_function("sequential 50 tx", |b| {
 		b.iter(|| {
 			let api = Arc::from(TestApi::new_dependant());
-			bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 50, api);
+			bench_configured(
+				Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()),
+				50,
+				api,
+			);
 		});
 	});
 
 	c.bench_function("random 100 tx", |b| {
 		b.iter(|| {
 			let api = Arc::from(TestApi::default());
-			bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 100, api);
+			bench_configured(
+				Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()),
+				100,
+				api,
+			);
 		});
 	});
 }
diff --git a/substrate/client/transaction-pool/src/common/tests.rs b/substrate/client/transaction-pool/src/common/tests.rs
index b00cf5fbfede903b3adeeaf13bc8aa28281cd633..7f2cbe24d8ef62bae4ccb4ffda8d974d92c38c97 100644
--- a/substrate/client/transaction-pool/src/common/tests.rs
+++ b/substrate/client/transaction-pool/src/common/tests.rs
@@ -222,5 +222,5 @@ pub(crate) fn uxt(transfer: Transfer) -> Extrinsic {
 
 pub(crate) fn pool() -> (Pool<TestApi>, Arc<TestApi>) {
 	let api = Arc::new(TestApi::default());
-	(Pool::new(Default::default(), true.into(), api.clone()), api)
+	(Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api)
 }
diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs
index 7679e3b169d2e6e757e253d9b8a374851eaec751..d69aa37c94a1acf3f8f37622d06f9f4e6209e43e 100644
--- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs
+++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs
@@ -329,14 +329,14 @@ where
 		let stream_map = futures::stream::unfold(ctx, |mut ctx| async move {
 			loop {
 				if let Some(dropped) = ctx.get_pending_dropped_transaction() {
-					debug!("dropped_watcher: sending out (pending): {dropped:?}");
+					trace!("dropped_watcher: sending out (pending): {dropped:?}");
 					return Some((dropped, ctx));
 				}
 				tokio::select! {
 					biased;
 					Some(event) = next_event(&mut ctx.stream_map) => {
 						if let Some(dropped) = ctx.handle_event(event.0, event.1) {
-							debug!("dropped_watcher: sending out: {dropped:?}");
+							trace!("dropped_watcher: sending out: {dropped:?}");
 							return Some((dropped, ctx));
 						}
 					},
diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs
index 4ec87f1fefa40ba043557a000b1d294c2e614127..e57256943ccfe37d771e9f7de2d4f17b2798c919 100644
--- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs
+++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs
@@ -318,7 +318,7 @@ where
 			pool_api.clone(),
 			listener.clone(),
 			metrics.clone(),
-			TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * (options.ready.count + options.future.count),
+			TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * options.total_count(),
 			options.ready.total_bytes + options.future.total_bytes,
 		));
 
diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs
index ff9cc1541af4cd2c63102f7c12e67c522205dd08..4c0ace0b1c73a65072b979a533f8e72c5f2e825b 100644
--- a/substrate/client/transaction-pool/src/graph/pool.rs
+++ b/substrate/client/transaction-pool/src/graph/pool.rs
@@ -158,6 +158,13 @@ impl Default for Options {
 	}
 }
 
+impl Options {
+	/// Total (ready+future) maximal number of transactions in the pool.
+	pub fn total_count(&self) -> usize {
+		self.ready.count + self.future.count
+	}
+}
+
 /// Should we check that the transaction is banned
 /// in the pool, before we verify it?
 #[derive(Copy, Clone)]
@@ -172,6 +179,21 @@ pub struct Pool<B: ChainApi> {
 }
 
 impl<B: ChainApi> Pool<B> {
+	/// Create a new transaction pool with statically sized rotator.
+	pub fn new_with_staticly_sized_rotator(
+		options: Options,
+		is_validator: IsValidator,
+		api: Arc<B>,
+	) -> Self {
+		Self {
+			validated_pool: Arc::new(ValidatedPool::new_with_staticly_sized_rotator(
+				options,
+				is_validator,
+				api,
+			)),
+		}
+	}
+
 	/// Create a new transaction pool.
 	pub fn new(options: Options, is_validator: IsValidator, api: Arc<B>) -> Self {
 		Self { validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)) }
@@ -284,6 +306,7 @@ impl<B: ChainApi> Pool<B> {
 		let mut validated_counter: usize = 0;
 
 		let mut future_tags = Vec::new();
+		let now = Instant::now();
 		for (extrinsic, in_pool_tags) in all {
 			match in_pool_tags {
 				// reuse the tags for extrinsics that were found in the pool
@@ -319,7 +342,7 @@ impl<B: ChainApi> Pool<B> {
 			}
 		}
 
-		log::trace!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}");
+		log::debug!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}, took:{:?}", now.elapsed());
 
 		self.prune_tags(at, future_tags, in_pool_hashes).await
 	}
@@ -351,6 +374,7 @@ impl<B: ChainApi> Pool<B> {
 		tags: impl IntoIterator<Item = Tag>,
 		known_imported_hashes: impl IntoIterator<Item = ExtrinsicHash<B>> + Clone,
 	) {
+		let now = Instant::now();
 		log::trace!(target: LOG_TARGET, "Pruning at {:?}", at);
 		// Prune all transactions that provide given tags
 		let prune_status = self.validated_pool.prune_tags(tags);
@@ -369,9 +393,8 @@ impl<B: ChainApi> Pool<B> {
 		let reverified_transactions =
 			self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await;
 
-		let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect();
-
-		log::trace!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}", &at, reverified_transactions.len());
+		let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect::<Vec<_>>();
+		log::debug!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}, reverification took: {:?}", &at, reverified_transactions.len(), now.elapsed());
 		log_xt_trace!(data: tuple, target: LOG_TARGET, &reverified_transactions, "[{:?}] Resubmitting transaction: {:?}");
 
 		// And finally - submit reverified transactions back to the pool
@@ -580,7 +603,7 @@ mod tests {
 	fn should_reject_unactionable_transactions() {
 		// given
 		let api = Arc::new(TestApi::default());
-		let pool = Pool::new(
+		let pool = Pool::new_with_staticly_sized_rotator(
 			Default::default(),
 			// the node does not author blocks
 			false.into(),
@@ -767,7 +790,7 @@ mod tests {
 		let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 		let api = Arc::new(TestApi::default());
-		let pool = Pool::new(options, true.into(), api.clone());
+		let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 		let hash1 =
 			block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap();
@@ -803,7 +826,7 @@ mod tests {
 		let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 		let api = Arc::new(TestApi::default());
-		let pool = Pool::new(options, true.into(), api.clone());
+		let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 		// when
 		block_on(
@@ -1036,7 +1059,7 @@ mod tests {
 				Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 			let api = Arc::new(TestApi::default());
-			let pool = Pool::new(options, true.into(), api.clone());
+			let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 			let xt = uxt(Transfer {
 				from: Alice.into(),
@@ -1074,7 +1097,7 @@ mod tests {
 					Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 				let api = Arc::new(TestApi::default());
-				let pool = Pool::new(options, true.into(), api.clone());
+				let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 				// after validation `IncludeData` will have priority set to 9001
 				// (validate_transaction mock)
@@ -1106,7 +1129,7 @@ mod tests {
 					Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 				let api = Arc::new(TestApi::default());
-				let pool = Pool::new(options, true.into(), api.clone());
+				let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 				let han_of_block0 = api.expect_hash_and_number(0);
 
@@ -1151,7 +1174,11 @@ mod tests {
 			let mut api = TestApi::default();
 			api.delay = Arc::new(Mutex::new(rx.into()));
 			let api = Arc::new(api);
-			let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone()));
+			let pool = Arc::new(Pool::new_with_staticly_sized_rotator(
+				Default::default(),
+				true.into(),
+				api.clone(),
+			));
 
 			let han_of_block0 = api.expect_hash_and_number(0);
 
diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs
index 9a2e269b5eede0cde2fb1d19165bf04fa1790dff..80d8f24144c8a02cd086bd2c765ea29055979483 100644
--- a/substrate/client/transaction-pool/src/graph/rotator.rs
+++ b/substrate/client/transaction-pool/src/graph/rotator.rs
@@ -31,7 +31,10 @@ use std::{
 use super::base_pool::Transaction;
 
 /// Expected size of the banned extrinsics cache.
-const EXPECTED_SIZE: usize = 2048;
+const DEFAULT_EXPECTED_SIZE: usize = 2048;
+
+/// The default duration, in seconds, for which an extrinsic is banned.
+const DEFAULT_BAN_TIME_SECS: u64 = 30 * 60;
 
 /// Pool rotator is responsible to only keep fresh extrinsics in the pool.
 ///
@@ -42,18 +45,39 @@ pub struct PoolRotator<Hash> {
 	ban_time: Duration,
 	/// Currently banned extrinsics.
 	banned_until: RwLock<HashMap<Hash, Instant>>,
+	/// Expected size of the banned extrinsics cache.
+	expected_size: usize,
+}
+
+impl<Hash: Clone> Clone for PoolRotator<Hash> {
+	fn clone(&self) -> Self {
+		Self {
+			ban_time: self.ban_time,
+			banned_until: RwLock::new(self.banned_until.read().clone()),
+			expected_size: self.expected_size,
+		}
+	}
 }
 
 impl<Hash: hash::Hash + Eq> Default for PoolRotator<Hash> {
 	fn default() -> Self {
-		Self { ban_time: Duration::from_secs(60 * 30), banned_until: Default::default() }
+		Self {
+			ban_time: Duration::from_secs(DEFAULT_BAN_TIME_SECS),
+			banned_until: Default::default(),
+			expected_size: DEFAULT_EXPECTED_SIZE,
+		}
 	}
 }
 
 impl<Hash: hash::Hash + Eq + Clone> PoolRotator<Hash> {
 	/// New rotator instance with specified ban time.
 	pub fn new(ban_time: Duration) -> Self {
-		Self { ban_time, banned_until: Default::default() }
+		Self { ban_time, ..Self::default() }
+	}
+
+	/// New rotator instance with specified ban time and expected cache size.
+	pub fn new_with_expected_size(ban_time: Duration, expected_size: usize) -> Self {
+		Self { expected_size, ..Self::new(ban_time) }
 	}
 
 	/// Returns `true` if extrinsic hash is currently banned.
@@ -69,8 +93,8 @@ impl<Hash: hash::Hash + Eq + Clone> PoolRotator<Hash> {
 			banned.insert(hash, *now + self.ban_time);
 		}
 
-		if banned.len() > 2 * EXPECTED_SIZE {
-			while banned.len() > EXPECTED_SIZE {
+		if banned.len() > 2 * self.expected_size {
+			while banned.len() > self.expected_size {
 				if let Some(key) = banned.keys().next().cloned() {
 					banned.remove(&key);
 				}
@@ -201,16 +225,16 @@ mod tests {
 		let past_block = 0;
 
 		// when
-		for i in 0..2 * EXPECTED_SIZE {
+		for i in 0..2 * DEFAULT_EXPECTED_SIZE {
 			let tx = tx_with(i as u64, past_block);
 			assert!(rotator.ban_if_stale(&now, past_block, &tx));
 		}
-		assert_eq!(rotator.banned_until.read().len(), 2 * EXPECTED_SIZE);
+		assert_eq!(rotator.banned_until.read().len(), 2 * DEFAULT_EXPECTED_SIZE);
 
 		// then
-		let tx = tx_with(2 * EXPECTED_SIZE as u64, past_block);
+		let tx = tx_with(2 * DEFAULT_EXPECTED_SIZE as u64, past_block);
 		// trigger a garbage collection
 		assert!(rotator.ban_if_stale(&now, past_block, &tx));
-		assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE);
+		assert_eq!(rotator.banned_until.read().len(), DEFAULT_EXPECTED_SIZE);
 	}
 }
diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs
index 14df63d9673e374503f909bdef28fc23765ef50a..3f7bf4773de7b2654a794382e4656d4de09ed702 100644
--- a/substrate/client/transaction-pool/src/graph/validated_pool.rs
+++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs
@@ -121,16 +121,41 @@ impl<B: ChainApi> Clone for ValidatedPool<B> {
 			listener: Default::default(),
 			pool: RwLock::from(self.pool.read().clone()),
 			import_notification_sinks: Default::default(),
-			rotator: PoolRotator::default(),
+			rotator: self.rotator.clone(),
 		}
 	}
 }
 
 impl<B: ChainApi> ValidatedPool<B> {
+	/// Create a new transaction pool with statically sized rotator.
+	pub fn new_with_staticly_sized_rotator(
+		options: Options,
+		is_validator: IsValidator,
+		api: Arc<B>,
+	) -> Self {
+		let ban_time = options.ban_time;
+		Self::new_with_rotator(options, is_validator, api, PoolRotator::new(ban_time))
+	}
+
 	/// Create a new transaction pool.
 	pub fn new(options: Options, is_validator: IsValidator, api: Arc<B>) -> Self {
-		let base_pool = base::BasePool::new(options.reject_future_transactions);
 		let ban_time = options.ban_time;
+		let total_count = options.total_count();
+		Self::new_with_rotator(
+			options,
+			is_validator,
+			api,
+			PoolRotator::new_with_expected_size(ban_time, total_count),
+		)
+	}
+
+	fn new_with_rotator(
+		options: Options,
+		is_validator: IsValidator,
+		api: Arc<B>,
+		rotator: PoolRotator<ExtrinsicHash<B>>,
+	) -> Self {
+		let base_pool = base::BasePool::new(options.reject_future_transactions);
 		Self {
 			is_validator,
 			options,
@@ -138,7 +163,7 @@ impl<B: ChainApi> ValidatedPool<B> {
 			api,
 			pool: RwLock::new(base_pool),
 			import_notification_sinks: Default::default(),
-			rotator: PoolRotator::new(ban_time),
+			rotator,
 		}
 	}
 
diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs
index f22fa2ddabdee8233a40e46a5fb952407d845088..caa09585b28bff894ab7c2476facad55761b2f69 100644
--- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs
+++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs
@@ -384,7 +384,11 @@ mod tests {
 	#[test]
 	fn revalidation_queue_works() {
 		let api = Arc::new(TestApi::default());
-		let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone()));
+		let pool = Arc::new(Pool::new_with_staticly_sized_rotator(
+			Default::default(),
+			true.into(),
+			api.clone(),
+		));
 		let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone()));
 
 		let uxt = uxt(Transfer {
@@ -414,7 +418,11 @@ mod tests {
 	#[test]
 	fn revalidation_queue_skips_revalidation_for_unknown_block_hash() {
 		let api = Arc::new(TestApi::default());
-		let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone()));
+		let pool = Arc::new(Pool::new_with_staticly_sized_rotator(
+			Default::default(),
+			true.into(),
+			api.clone(),
+		));
 		let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone()));
 
 		let uxt0 = uxt(Transfer {
diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs
index e7504012ca67be2bae6e14ad2406df8bdb968f86..2b32704945c759f4ae7a6f473a54e5098d6685ad 100644
--- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs
+++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs
@@ -141,7 +141,11 @@ where
 		finalized_hash: Block::Hash,
 		options: graph::Options,
 	) -> (Self, Pin<Box<dyn Future<Output = ()> + Send>>) {
-		let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone()));
+		let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator(
+			options,
+			true.into(),
+			pool_api.clone(),
+		));
 		let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background(
 			pool_api.clone(),
 			pool.clone(),
@@ -177,7 +181,11 @@ where
 		best_block_hash: Block::Hash,
 		finalized_hash: Block::Hash,
 	) -> Self {
-		let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone()));
+		let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator(
+			options,
+			is_validator,
+			pool_api.clone(),
+		));
 		let (revalidation_queue, background_task) = match revalidation_type {
 			RevalidationType::Light =>
 				(revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None),
diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs
index 8bf08122995c1a2c270688d8294266b403a27302..dd82c52a6047b865c516ddf99197bec638cbfe48 100644
--- a/substrate/client/transaction-pool/tests/fatp.rs
+++ b/substrate/client/transaction-pool/tests/fatp.rs
@@ -2199,7 +2199,7 @@ fn import_sink_works3() {
 		pool.submit_one(genesis, SOURCE, xt1.clone()),
 	];
 
-	let x = block_on(futures::future::join_all(submissions));
+	block_on(futures::future::join_all(submissions));
 
 	let header01a = api.push_block(1, vec![], true);
 	let header01b = api.push_block(1, vec![], true);
@@ -2213,8 +2213,6 @@ fn import_sink_works3() {
 	assert_pool_status!(header01a.hash(), &pool, 1, 1);
 	assert_pool_status!(header01b.hash(), &pool, 1, 1);
 
-	log::debug!("xxx {x:#?}");
-
 	let import_events =
 		futures::executor::block_on_stream(import_stream).take(1).collect::<Vec<_>>();
 
diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs
index 20997606c607cfd71e106311e5c0b3c5f3f9b128..de35726435f0f1c53bbc1e4a0cd758f8121b0530 100644
--- a/substrate/client/transaction-pool/tests/pool.rs
+++ b/substrate/client/transaction-pool/tests/pool.rs
@@ -49,7 +49,7 @@ const LOG_TARGET: &str = "txpool";
 
 fn pool() -> (Pool<TestApi>, Arc<TestApi>) {
 	let api = Arc::new(TestApi::with_alice_nonce(209));
-	(Pool::new(Default::default(), true.into(), api.clone()), api)
+	(Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api)
 }
 
 fn maintained_pool() -> (BasicPool<TestApi, Block>, Arc<TestApi>, futures::executor::ThreadPool) {
@@ -224,7 +224,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() {
 	api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| {
 		v.provides.push(vec![155]);
 	}));
-	let pool = Pool::new(Default::default(), true.into(), api.clone());
+	let pool = Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone());
 	let xt0 = Arc::from(uxt(Alice, 209));
 	block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone()))
 		.expect("1. Imported");
diff --git a/substrate/frame/bags-list/remote-tests/Cargo.toml b/substrate/frame/bags-list/remote-tests/Cargo.toml
index 99b203e73fb0475569ab35823cff685454b65d68..e3215803a02010e722873a39883197d466535886 100644
--- a/substrate/frame/bags-list/remote-tests/Cargo.toml
+++ b/substrate/frame/bags-list/remote-tests/Cargo.toml
@@ -26,7 +26,6 @@ pallet-staking = { workspace = true, default-features = true }
 # core
 sp-core = { workspace = true, default-features = true }
 sp-runtime = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 sp-storage = { workspace = true, default-features = true }
 sp-tracing = { workspace = true, default-features = true }
 
diff --git a/substrate/frame/balances/src/impl_currency.rs b/substrate/frame/balances/src/impl_currency.rs
index 23feb46b72ca02364ae05f1605cff5477b4234c7..bc7e77c191db86dd2805f200c3ceb17a293fce73 100644
--- a/substrate/frame/balances/src/impl_currency.rs
+++ b/substrate/frame/balances/src/impl_currency.rs
@@ -632,7 +632,7 @@ where
 	///
 	/// This is `Polite` and thus will not repatriate any funds which would lead the total balance
 	/// to be less than the frozen amount. Returns `Ok` with the actual amount of funds moved,
-	/// which may be less than `value` since the operation is done an a `BestEffort` basis.
+	/// which may be less than `value` since the operation is done on a `BestEffort` basis.
 	fn repatriate_reserved(
 		slashed: &T::AccountId,
 		beneficiary: &T::AccountId,
diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs
index 64f93b22cf1b50c1f463b5e48f5c51cc3e3b719b..99aad0301c12760c8c75881b7c2f2dac84c0767d 100644
--- a/substrate/frame/benchmarking/src/v1.rs
+++ b/substrate/frame/benchmarking/src/v1.rs
@@ -1894,7 +1894,7 @@ macro_rules! add_benchmark {
 /// This macro allows users to easily generate a list of benchmarks for the pallets configured
 /// in the runtime.
 ///
-/// To use this macro, first create a an object to store the list:
+/// To use this macro, first create an object to store the list:
 ///
 /// ```ignore
 /// let mut list = Vec::<BenchmarkList>::new();
diff --git a/substrate/frame/bounties/README.md b/substrate/frame/bounties/README.md
index 232334cb1edd64ea8aab141055457c494a352073..2293ae161e28e947daab9685ef35d35ebdacb1f5 100644
--- a/substrate/frame/bounties/README.md
+++ b/substrate/frame/bounties/README.md
@@ -19,7 +19,7 @@ curator or once the bounty is active or payout is pending, resulting in the slas
 curator's deposit.
 
 This pallet may opt into using a [`ChildBountyManager`] that enables bounties to be split into
-sub-bounties, as children of anh established bounty (called the parent in the context of it's
+sub-bounties, as children of an established bounty (called the parent in the context of it's
 children).
 
 > NOTE: The parent bounty cannot be closed if it has a non-zero number of it has active child
diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs
index 1e931958898da8820a590fcb76014f1534620f32..b5155909e3cde0f6702bdb2174d7fea4d06cdede 100644
--- a/substrate/frame/bounties/src/benchmarking.rs
+++ b/substrate/frame/bounties/src/benchmarking.rs
@@ -15,9 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//! bounties pallet benchmarking.
-
-#![cfg(feature = "runtime-benchmarks")]
+//! Bounties pallet benchmarking.
 
 use super::*;
 
@@ -37,6 +35,16 @@ fn set_block_number<T: Config<I>, I: 'static>(n: BlockNumberFor<T, I>) {
 	<T as pallet_treasury::Config<I>>::BlockNumberProvider::set_block_number(n);
 }
 
+fn minimum_balance<T: Config<I>, I: 'static>() -> BalanceOf<T, I> {
+	let minimum_balance = T::Currency::minimum_balance();
+
+	if minimum_balance.is_zero() {
+		1u32.into()
+	} else {
+		minimum_balance
+	}
+}
+
 // Create bounties that are approved for use in `on_initialize`.
 fn create_approved_bounties<T: Config<I>, I: 'static>(n: u32) -> Result<(), BenchmarkError> {
 	for i in 0..n {
@@ -62,12 +70,10 @@ fn setup_bounty<T: Config<I>, I: 'static>(
 	let fee = value / 2u32.into();
 	let deposit = T::BountyDepositBase::get() +
 		T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into();
-	let _ = T::Currency::make_free_balance_be(&caller, deposit + T::Currency::minimum_balance());
+	let _ = T::Currency::make_free_balance_be(&caller, deposit + minimum_balance::<T, I>());
 	let curator = account("curator", u, SEED);
-	let _ = T::Currency::make_free_balance_be(
-		&curator,
-		fee / 2u32.into() + T::Currency::minimum_balance(),
-	);
+	let _ =
+		T::Currency::make_free_balance_be(&curator, fee / 2u32.into() + minimum_balance::<T, I>());
 	let reason = vec![0; d as usize];
 	(caller, curator, fee, value, reason)
 }
@@ -91,7 +97,7 @@ fn create_bounty<T: Config<I>, I: 'static>(
 
 fn setup_pot_account<T: Config<I>, I: 'static>() {
 	let pot_account = Bounties::<T, I>::account_id();
-	let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into());
+	let value = minimum_balance::<T, I>().saturating_mul(1_000_000_000u32.into());
 	let _ = T::Currency::make_free_balance_be(&pot_account, value);
 }
 
diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs
index 729c76b5cc750752ee6cd9d9f1906471fa29e7f8..9b6e3c06e91419f3b584ff4e87dfcbf087a2857a 100644
--- a/substrate/frame/bounties/src/lib.rs
+++ b/substrate/frame/bounties/src/lib.rs
@@ -36,7 +36,7 @@
 //! curator's deposit.
 //!
 //! This pallet may opt into using a [`ChildBountyManager`] that enables bounties to be split into
-//! sub-bounties, as children of anh established bounty (called the parent in the context of it's
+//! sub-bounties, as children of an established bounty (called the parent in the context of it's
 //! children).
 //!
 //! > NOTE: The parent bounty cannot be closed if it has a non-zero number of it has active child
@@ -84,6 +84,7 @@
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
+#[cfg(feature = "runtime-benchmarks")]
 mod benchmarking;
 pub mod migrations;
 mod tests;
diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml
index e39128639e3e06e289684ee90c7a14448ac50ce1..5784e6dd1553382e9ad2a27261243e7dd987fdf9 100644
--- a/substrate/frame/contracts/Cargo.toml
+++ b/substrate/frame/contracts/Cargo.toml
@@ -50,7 +50,6 @@ sp-api = { workspace = true }
 sp-core = { workspace = true }
 sp-io = { workspace = true }
 sp-runtime = { workspace = true }
-sp-std = { workspace = true }
 
 xcm = { workspace = true }
 xcm-builder = { workspace = true }
@@ -98,7 +97,6 @@ std = [
 	"sp-io/std",
 	"sp-keystore/std",
 	"sp-runtime/std",
-	"sp-std/std",
 	"wasm-instrument?/std",
 	"wasmi/std",
 	"xcm-builder/std",
diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs
index 4aba1d24dbd5889e925fb9bbab53cf600642ab77..5c3c34e6ef32d06a567fc2b7f1373080c66a79ca 100644
--- a/substrate/frame/contracts/proc-macro/src/lib.rs
+++ b/substrate/frame/contracts/proc-macro/src/lib.rs
@@ -650,10 +650,9 @@ fn expand_functions(def: &EnvDef, expand_mode: ExpandMode) -> TokenStream2 {
 				let result = #body;
 				if ::log::log_enabled!(target: "runtime::contracts::strace", ::log::Level::Trace) {
 						use core::fmt::Write;
-						let mut w = sp_std::Writer::default();
-						let _ = core::write!(&mut w, #trace_fmt_str, #( #trace_fmt_args, )* result);
-						let msg = core::str::from_utf8(&w.inner()).unwrap_or_default();
-						ctx.ext().append_debug_buffer(msg);
+						let mut msg = alloc::string::String::default();
+						let _ = core::write!(&mut msg, #trace_fmt_str, #( #trace_fmt_args, )* result);
+						ctx.ext().append_debug_buffer(&msg);
 				}
 				result
 			}
diff --git a/substrate/frame/contracts/src/transient_storage.rs b/substrate/frame/contracts/src/transient_storage.rs
index c795a966385a92572958cf67202e4d7f5b36e85c..c9b1dac1ad75299ebb51bb3b8ca9cdbf7dc69442 100644
--- a/substrate/frame/contracts/src/transient_storage.rs
+++ b/substrate/frame/contracts/src/transient_storage.rs
@@ -22,11 +22,11 @@ use crate::{
 	storage::WriteOutcome,
 	Config, Error,
 };
+use alloc::{collections::BTreeMap, vec::Vec};
 use codec::Encode;
-use core::marker::PhantomData;
+use core::{marker::PhantomData, mem};
 use frame_support::DefaultNoBound;
 use sp_runtime::{DispatchError, DispatchResult, Saturating};
-use sp_std::{collections::btree_map::BTreeMap, mem, vec::Vec};
 
 /// Meter entry tracks transaction allocations.
 #[derive(Default, Debug)]
diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs
index adb8a4a091b8dcf473277763d992a632e3fb539a..ac0d489953c1fc279d34ed1ca8c74ba7b8a9ea31 100644
--- a/substrate/frame/core-fellowship/src/benchmarking.rs
+++ b/substrate/frame/core-fellowship/src/benchmarking.rs
@@ -50,6 +50,7 @@ mod benchmarks {
 		for _ in 0..rank {
 			T::Members::promote(&member)?;
 		}
+		#[allow(deprecated)]
 		CoreFellowship::<T, I>::import(RawOrigin::Signed(member.clone()).into())?;
 		Ok(member)
 	}
@@ -260,6 +261,23 @@ mod benchmarks {
 		Ok(())
 	}
 
+	#[benchmark]
+	fn import_member() -> Result<(), BenchmarkError> {
+		let member = account("member", 0, SEED);
+		let sender = account("sender", 0, SEED);
+
+		T::Members::induct(&member)?;
+		T::Members::promote(&member)?;
+
+		assert!(!Member::<T, I>::contains_key(&member));
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(sender), member.clone());
+
+		assert!(Member::<T, I>::contains_key(&member));
+		Ok(())
+	}
+
 	#[benchmark]
 	fn approve() -> Result<(), BenchmarkError> {
 		let member = make_member::<T, I>(1)?;
diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs
index c61447e36280a93e0ce553116c8b868d606061a3..22ba63b26161d45efea8189e60d24bf0cbb9e04b 100644
--- a/substrate/frame/core-fellowship/src/lib.rs
+++ b/substrate/frame/core-fellowship/src/lib.rs
@@ -21,6 +21,7 @@
 //! This only handles members of non-zero rank.
 //!
 //! # Process Flow
+//!
 //! - Begin with a call to `induct`, where some privileged origin (perhaps a pre-existing member of
 //!   `rank > 1`) is able to make a candidate from an account and introduce it to be tracked in this
 //!   pallet in order to allow evidence to be submitted and promotion voted on.
@@ -36,8 +37,9 @@
 //!   `bump` to demote the candidate by one rank.
 //! - If a candidate fails to be promoted to a member within the `offboard_timeout` period, then
 //!   anyone may call `bump` to remove the account's candidacy.
-//! - Pre-existing members may call `import` to have their rank recognised and be inducted into this
-//!   pallet (to gain a salary and allow for eventual promotion).
+//! - Pre-existing members may call `import_member` on themselves (formerly `import`) to have their
+//!   rank recognised and be inducted into this pallet (to gain a salary and allow for eventual
+//!   promotion).
 //! - If, externally to this pallet, a member or candidate has their rank removed completely, then
 //!   `offboard` may be called to remove them entirely from this pallet.
 //!
@@ -585,28 +587,44 @@ pub mod pallet {
 			Ok(if replaced { Pays::Yes } else { Pays::No }.into())
 		}
 
-		/// Introduce an already-ranked individual of the collective into this pallet. The rank may
-		/// still be zero.
+		/// Introduce an already-ranked individual of the collective into this pallet.
 		///
-		/// This resets `last_proof` to the current block and `last_promotion` will be set to zero,
-		/// thereby delaying any automatic demotion but allowing immediate promotion.
+		/// The rank may still be zero. This resets `last_proof` to the current block and
+		/// `last_promotion` will be set to zero, thereby delaying any automatic demotion but
+		/// allowing immediate promotion.
 		///
 		/// - `origin`: A signed origin of a ranked, but not tracked, account.
 		#[pallet::weight(T::WeightInfo::import())]
 		#[pallet::call_index(8)]
+		#[deprecated = "Use `import_member` instead"]
+		#[allow(deprecated)] // Otherwise FRAME will complain about using something deprecated.
 		pub fn import(origin: OriginFor<T>) -> DispatchResultWithPostInfo {
 			let who = ensure_signed(origin)?;
-			ensure!(!Member::<T, I>::contains_key(&who), Error::<T, I>::AlreadyInducted);
-			let rank = T::Members::rank_of(&who).ok_or(Error::<T, I>::Unranked)?;
+			Self::do_import(who)?;
 
-			let now = frame_system::Pallet::<T>::block_number();
-			Member::<T, I>::insert(
-				&who,
-				MemberStatus { is_active: true, last_promotion: 0u32.into(), last_proof: now },
-			);
-			Self::deposit_event(Event::<T, I>::Imported { who, rank });
+			Ok(Pays::No.into()) // Successful imports are free
+		}
 
-			Ok(Pays::No.into())
+		/// Introduce an already-ranked individual of the collective into this pallet.
+		///
+		/// The rank may still be zero. Can be called by anyone on any collective member - including
+		/// the sender.
+		///
+		/// This resets `last_proof` to the current block and `last_promotion` will be set to zero,
+		/// thereby delaying any automatic demotion but allowing immediate promotion.
+		///
+		/// - `origin`: A signed origin of a ranked, but not tracked, account.
+		/// - `who`: The account ID of the collective member to be inducted.
+		#[pallet::weight(T::WeightInfo::set_partial_params())]
+		#[pallet::call_index(11)]
+		pub fn import_member(
+			origin: OriginFor<T>,
+			who: T::AccountId,
+		) -> DispatchResultWithPostInfo {
+			ensure_signed(origin)?;
+			Self::do_import(who)?;
+
+			Ok(Pays::No.into()) // Successful imports are free
 		}
 
 		/// Set the parameters partially.
@@ -661,6 +679,24 @@ pub mod pallet {
 				}
 			}
 		}
+
+		/// Import `who` into the core-fellowship pallet.
+		///
+		/// `who` must be a member of the collective but *not* already imported.
+		pub(crate) fn do_import(who: T::AccountId) -> DispatchResult {
+			ensure!(!Member::<T, I>::contains_key(&who), Error::<T, I>::AlreadyInducted);
+			let rank = T::Members::rank_of(&who).ok_or(Error::<T, I>::Unranked)?;
+
+			let now = frame_system::Pallet::<T>::block_number();
+			Member::<T, I>::insert(
+				&who,
+				MemberStatus { is_active: true, last_promotion: 0u32.into(), last_proof: now },
+			);
+			Self::deposit_event(Event::<T, I>::Imported { who, rank });
+
+			Ok(())
+		}
+
 		/// Convert a rank into a `0..RANK_COUNT` index suitable for the arrays in Params.
 		///
 		/// Rank 1 becomes index 0, rank `RANK_COUNT` becomes index `RANK_COUNT - 1`. Any rank not
@@ -766,6 +802,7 @@ impl<T: Config<I>, I: 'static>
 	pallet_ranked_collective::BenchmarkSetup<<T as frame_system::Config>::AccountId> for Pallet<T, I>
 {
 	fn ensure_member(who: &<T as frame_system::Config>::AccountId) {
+		#[allow(deprecated)]
 		Self::import(frame_system::RawOrigin::Signed(who.clone()).into()).unwrap();
 	}
 }
diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs
index 7a48ed9783e7ba646aaa979b0aa3c20635ee6a54..b2149336547d3d9c7c43d00dbe5e0f61071d28a1 100644
--- a/substrate/frame/core-fellowship/src/tests/integration.rs
+++ b/substrate/frame/core-fellowship/src/tests/integration.rs
@@ -17,8 +17,10 @@
 
 //! Integration test together with the ranked-collective pallet.
 
+#![allow(deprecated)]
+
 use frame_support::{
-	assert_noop, assert_ok, derive_impl, hypothetically, ord_parameter_types,
+	assert_noop, assert_ok, derive_impl, hypothetically, hypothetically_ok, ord_parameter_types,
 	pallet_prelude::Weight,
 	parameter_types,
 	traits::{ConstU16, EitherOf, IsInVec, MapSuccess, NoOpPoll, TryMapSuccess},
@@ -170,6 +172,37 @@ fn evidence(e: u32) -> Evidence<Test, ()> {
 		.expect("Static length matches")
 }
 
+#[test]
+fn import_simple_works() {
+	new_test_ext().execute_with(|| {
+		for i in 0u16..9 {
+			let acc = i as u64;
+
+			// Does not work yet
+			assert_noop!(CoreFellowship::import(signed(acc)), Error::<Test>::Unranked);
+			assert_noop!(
+				CoreFellowship::import_member(signed(acc + 1), acc),
+				Error::<Test>::Unranked
+			);
+
+			assert_ok!(Club::add_member(RuntimeOrigin::root(), acc));
+			promote_n_times(acc, i);
+
+			hypothetically_ok!(CoreFellowship::import(signed(acc)));
+			hypothetically_ok!(CoreFellowship::import_member(signed(acc), acc));
+			// Works from other accounts
+			assert_ok!(CoreFellowship::import_member(signed(acc + 1), acc));
+
+			// Does not work again
+			assert_noop!(CoreFellowship::import(signed(acc)), Error::<Test>::AlreadyInducted);
+			assert_noop!(
+				CoreFellowship::import_member(signed(acc + 1), acc),
+				Error::<Test>::AlreadyInducted
+			);
+		}
+	});
+}
+
 #[test]
 fn swap_simple_works() {
 	new_test_ext().execute_with(|| {
@@ -178,7 +211,8 @@ fn swap_simple_works() {
 
 			assert_ok!(Club::add_member(RuntimeOrigin::root(), acc));
 			promote_n_times(acc, i);
-			assert_ok!(CoreFellowship::import(signed(acc)));
+			hypothetically_ok!(CoreFellowship::import(signed(acc)));
+			assert_ok!(CoreFellowship::import_member(signed(acc), acc));
 
 			// Swapping normally works:
 			assert_ok!(Club::exchange_member(RuntimeOrigin::root(), acc, acc + 10));
diff --git a/substrate/frame/core-fellowship/src/tests/unit.rs b/substrate/frame/core-fellowship/src/tests/unit.rs
index 11d1ea9fe5b7563f16d6d9c7118071ec2b1a2e92..f4418ed439d0c00f715b40a281596461ce421e20 100644
--- a/substrate/frame/core-fellowship/src/tests/unit.rs
+++ b/substrate/frame/core-fellowship/src/tests/unit.rs
@@ -17,6 +17,8 @@
 
 //! The crate's tests.
 
+#![allow(deprecated)]
+
 use std::collections::BTreeMap;
 
 use core::cell::RefCell;
@@ -222,6 +224,66 @@ fn set_partial_params_works() {
 	});
 }
 
+#[test]
+fn import_member_works() {
+	new_test_ext().execute_with(|| {
+		assert_noop!(CoreFellowship::import_member(signed(0), 0), Error::<Test>::Unranked);
+		assert_noop!(CoreFellowship::import(signed(0)), Error::<Test>::Unranked);
+
+		// Make induction work:
+		set_rank(0, 1);
+		assert!(!Member::<Test>::contains_key(0), "not yet imported");
+
+		// `import_member` can be used to induct ourselves:
+		hypothetically!({
+			assert_ok!(CoreFellowship::import_member(signed(0), 0));
+			assert!(Member::<Test>::contains_key(0), "got imported");
+
+			// Twice does not work:
+			assert_noop!(
+				CoreFellowship::import_member(signed(0), 0),
+				Error::<Test>::AlreadyInducted
+			);
+			assert_noop!(CoreFellowship::import(signed(0)), Error::<Test>::AlreadyInducted);
+		});
+
+		// But we could have also used `import`:
+		hypothetically!({
+			assert_ok!(CoreFellowship::import(signed(0)));
+			assert!(Member::<Test>::contains_key(0), "got imported");
+
+			// Twice does not work:
+			assert_noop!(
+				CoreFellowship::import_member(signed(0), 0),
+				Error::<Test>::AlreadyInducted
+			);
+			assert_noop!(CoreFellowship::import(signed(0)), Error::<Test>::AlreadyInducted);
+		});
+	});
+}
+
+#[test]
+fn import_member_same_as_import() {
+	new_test_ext().execute_with(|| {
+		for rank in 0..=9 {
+			set_rank(0, rank);
+
+			let import_root = hypothetically!({
+				assert_ok!(CoreFellowship::import(signed(0)));
+				sp_io::storage::root(sp_runtime::StateVersion::V1)
+			});
+
+			let import_member_root = hypothetically!({
+				assert_ok!(CoreFellowship::import_member(signed(1), 0));
+				sp_io::storage::root(sp_runtime::StateVersion::V1)
+			});
+
+			// `import` and `import_member` do exactly the same thing.
+			assert_eq!(import_root, import_member_root);
+		}
+	});
+}
+
 #[test]
 fn induct_works() {
 	new_test_ext().execute_with(|| {
diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs
index 9bca8cb56094f7bbd03ce979f12b0fec2e8526ac..e6381c854d344a53dceb903da9ac3b1ed6e95c57 100644
--- a/substrate/frame/core-fellowship/src/weights.rs
+++ b/substrate/frame/core-fellowship/src/weights.rs
@@ -61,6 +61,7 @@ pub trait WeightInfo {
 	fn promote_fast(r: u32, ) -> Weight;
 	fn offboard() -> Weight;
 	fn import() -> Weight;
+	fn import_member() -> Weight;
 	fn approve() -> Weight;
 	fn submit_evidence() -> Weight;
 }
@@ -76,7 +77,7 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `0`
 		// Minimum execution time: 6_652_000 picoseconds.
 		Weight::from_parts(7_082_000, 0)
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `CoreFellowship::Params` (r:1 w:1)
 	/// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
@@ -86,8 +87,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `1853`
 		// Minimum execution time: 12_485_000 picoseconds.
 		Weight::from_parts(12_784_000, 1853)
-			.saturating_add(T::DbWeight::get().reads(1_u64))
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -109,8 +110,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 61_243_000 picoseconds.
 		Weight::from_parts(63_033_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(6_u64))
-			.saturating_add(T::DbWeight::get().writes(6_u64))
+			.saturating_add(RocksDbWeight::get().reads(6_u64))
+			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -132,8 +133,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 65_063_000 picoseconds.
 		Weight::from_parts(67_047_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(6_u64))
-			.saturating_add(T::DbWeight::get().writes(6_u64))
+			.saturating_add(RocksDbWeight::get().reads(6_u64))
+			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:0)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -145,8 +146,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `3514`
 		// Minimum execution time: 21_924_000 picoseconds.
 		Weight::from_parts(22_691_000, 3514)
-			.saturating_add(T::DbWeight::get().reads(2_u64))
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -164,8 +165,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `3514`
 		// Minimum execution time: 24_720_000 picoseconds.
 		Weight::from_parts(25_580_000, 3514)
-			.saturating_add(T::DbWeight::get().reads(3_u64))
-			.saturating_add(T::DbWeight::get().writes(5_u64))
+			.saturating_add(RocksDbWeight::get().reads(3_u64))
+			.saturating_add(RocksDbWeight::get().writes(5_u64))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:1)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -187,8 +188,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 58_481_000 picoseconds.
 		Weight::from_parts(59_510_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(5_u64))
-			.saturating_add(T::DbWeight::get().writes(6_u64))
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:1)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -211,10 +212,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		Weight::from_parts(42_220_685, 19894)
 			// Standard Error: 18_061
 			.saturating_add(Weight::from_parts(13_858_309, 0).saturating_mul(r.into()))
-			.saturating_add(T::DbWeight::get().reads(3_u64))
-			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into())))
-			.saturating_add(T::DbWeight::get().writes(3_u64))
-			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into())))
+			.saturating_add(RocksDbWeight::get().reads(3_u64))
+			.saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into())))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+			.saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into())))
 			.saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into()))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:0)
@@ -229,8 +230,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `3514`
 		// Minimum execution time: 17_492_000 picoseconds.
 		Weight::from_parts(18_324_000, 3514)
-			.saturating_add(T::DbWeight::get().reads(2_u64))
-			.saturating_add(T::DbWeight::get().writes(2_u64))
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -242,8 +243,18 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `3514`
 		// Minimum execution time: 16_534_000 picoseconds.
 		Weight::from_parts(17_046_000, 3514)
-			.saturating_add(T::DbWeight::get().reads(2_u64))
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	fn import_member() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `285`
+		//  Estimated: `3514`
+		// Minimum execution time: 23_239_000 picoseconds.
+		Weight::from_parts(23_684_000, 0)
+			.saturating_add(Weight::from_parts(0, 3514))
+			.saturating_add(RocksDbWeight::get().reads(2))
+			.saturating_add(RocksDbWeight::get().writes(1))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:0)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -257,8 +268,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 42_264_000 picoseconds.
 		Weight::from_parts(43_281_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(3_u64))
-			.saturating_add(T::DbWeight::get().writes(2_u64))
+			.saturating_add(RocksDbWeight::get().reads(3_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:0)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -270,8 +281,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 25_461_000 picoseconds.
 		Weight::from_parts(26_014_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(2_u64))
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 }
 
@@ -454,6 +465,16 @@ impl WeightInfo for () {
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
+	fn import_member() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `285`
+		//  Estimated: `3514`
+		// Minimum execution time: 23_239_000 picoseconds.
+		Weight::from_parts(23_684_000, 0)
+			.saturating_add(Weight::from_parts(0, 3514))
+			.saturating_add(RocksDbWeight::get().reads(2))
+			.saturating_add(RocksDbWeight::get().writes(1))
+	}
 	/// Storage: `RankedCollective::Members` (r:1 w:0)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
diff --git a/substrate/frame/democracy/README.md b/substrate/frame/democracy/README.md
index bbc5f1c65586ae7054e24df183ad12c6ac6357f5..d9d21e62447768bc7cb33e2caa2d02c77f71e9c8 100644
--- a/substrate/frame/democracy/README.md
+++ b/substrate/frame/democracy/README.md
@@ -96,7 +96,7 @@ This call can only be made by the `CancellationOrigin`.
 
 This call can only be made by the `ExternalOrigin`.
 
-- `external_propose` - Schedules a proposal to become a referendum once it is is legal
+- `external_propose` - Schedules a proposal to become a referendum once it is legal
   for an externally proposed referendum.
 
 #### External Majority Origin
diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs
index 27bc36a756e4b36f3d6f3afdc2eb351063f6df56..2c662fbad26a5a5973ca3c4986e0189d5ca8271f 100644
--- a/substrate/frame/democracy/src/lib.rs
+++ b/substrate/frame/democracy/src/lib.rs
@@ -113,7 +113,7 @@
 //!
 //! This call can only be made by the `ExternalOrigin`.
 //!
-//! - `external_propose` - Schedules a proposal to become a referendum once it is is legal for an
+//! - `external_propose` - Schedules a proposal to become a referendum once it is legal for an
 //!   externally proposed referendum.
 //!
 //! #### External Majority Origin
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml
index 5009d3d54d56dc9009a5b8ae52bb00c614372208..7a48ae868a5a2e63d6b0467753cf5ca5f8b56f2c 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml
@@ -26,7 +26,6 @@ sp-io = { workspace = true, default-features = true }
 sp-npos-elections = { workspace = true }
 sp-runtime = { workspace = true, default-features = true }
 sp-staking = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 sp-tracing = { workspace = true, default-features = true }
 
 frame-election-provider-support = { workspace = true, default-features = true }
diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs
index effbb6e786c0d0b948c843656e72978bbc8eb3ec..fa1c48ee65edac9a7ca63132dfd917d7afa1b912 100644
--- a/substrate/frame/elections-phragmen/src/lib.rs
+++ b/substrate/frame/elections-phragmen/src/lib.rs
@@ -616,7 +616,7 @@ pub mod pallet {
 	#[pallet::generate_deposit(pub(super) fn deposit_event)]
 	pub enum Event<T: Config> {
 		/// A new term with new_members. This indicates that enough candidates existed to run
-		/// the election, not that enough have has been elected. The inner value must be examined
+		/// the election, not that enough have been elected. The inner value must be examined
 		/// for this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond
 		/// slashed and none were elected, whilst `EmptyTerm` means that no candidates existed to
 		/// begin with.
diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs
index 3841b010325b28c832ed197ac55c479c724c9afc..882d875f3d804901d09be10d9a4d186501c568fb 100644
--- a/substrate/frame/executive/src/tests.rs
+++ b/substrate/frame/executive/src/tests.rs
@@ -335,6 +335,9 @@ impl frame_system::ExtensionsWeightInfo for MockExtensionsWeights {
 	fn check_weight() -> Weight {
 		Weight::from_parts(10, 0)
 	}
+	fn weight_reclaim() -> Weight {
+		Weight::zero()
+	}
 }
 
 #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
@@ -452,6 +455,7 @@ type TxExtension = (
 	frame_system::CheckNonce<Runtime>,
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 type UncheckedXt = sp_runtime::generic::UncheckedExtrinsic<
 	u64,
@@ -560,6 +564,7 @@ fn tx_ext(nonce: u64, fee: Balance) -> TxExtension {
 		frame_system::CheckNonce::from(nonce),
 		frame_system::CheckWeight::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::from(fee),
+		frame_system::WeightReclaim::new(),
 	)
 		.into()
 }
diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs
index 0a10e588277615fd73cd80a25b1167aa02faeae3..56048efa22cae6077e6d6f0e18fb772dff7777b4 100644
--- a/substrate/frame/grandpa/src/benchmarking.rs
+++ b/substrate/frame/grandpa/src/benchmarking.rs
@@ -17,7 +17,7 @@
 
 //! Benchmarks for the GRANDPA pallet.
 
-use super::{Pallet as Grandpa, *};
+use super::*;
 use frame_benchmarking::v2::*;
 use frame_system::RawOrigin;
 use sp_core::H256;
@@ -69,7 +69,7 @@ mod benchmarks {
 		#[extrinsic_call]
 		_(RawOrigin::Root, delay, best_finalized_block_number);
 
-		assert!(Grandpa::<T>::stalled().is_some());
+		assert!(Stalled::<T>::get().is_some());
 	}
 
 	impl_benchmark_test_suite!(
diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs
index 2366c957e9ab17e335ed5c830af64134ad39f033..4ebdbc1eecd300fb98a3dda0ac4035505c3454c9 100644
--- a/substrate/frame/grandpa/src/equivocation.rs
+++ b/substrate/frame/grandpa/src/equivocation.rs
@@ -177,7 +177,7 @@ where
 		evidence: (EquivocationProof<T::Hash, BlockNumberFor<T>>, T::KeyOwnerProof),
 	) -> Result<(), DispatchError> {
 		let (equivocation_proof, key_owner_proof) = evidence;
-		let reporter = reporter.or_else(|| <pallet_authorship::Pallet<T>>::author());
+		let reporter = reporter.or_else(|| pallet_authorship::Pallet::<T>::author());
 		let offender = equivocation_proof.offender().clone();
 
 		// We check the equivocation within the context of its set id (and
diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs
index 4f69aeaef523671969e961b24b4b87b59115bae3..9017eec2ca8f8139a036ed0b59e61f3fb8277a9f 100644
--- a/substrate/frame/grandpa/src/lib.rs
+++ b/substrate/frame/grandpa/src/lib.rs
@@ -127,7 +127,7 @@ pub mod pallet {
 	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
 		fn on_finalize(block_number: BlockNumberFor<T>) {
 			// check for scheduled pending authority set changes
-			if let Some(pending_change) = <PendingChange<T>>::get() {
+			if let Some(pending_change) = PendingChange::<T>::get() {
 				// emit signal if we're at the block that scheduled the change
 				if block_number == pending_change.scheduled_at {
 					let next_authorities = pending_change.next_authorities.to_vec();
@@ -150,12 +150,12 @@ pub mod pallet {
 					Self::deposit_event(Event::NewAuthorities {
 						authority_set: pending_change.next_authorities.into_inner(),
 					});
-					<PendingChange<T>>::kill();
+					PendingChange::<T>::kill();
 				}
 			}
 
 			// check for scheduled pending state changes
-			match <State<T>>::get() {
+			match State::<T>::get() {
 				StoredState::PendingPause { scheduled_at, delay } => {
 					// signal change to pause
 					if block_number == scheduled_at {
@@ -164,7 +164,7 @@ pub mod pallet {
 
 					// enact change to paused state
 					if block_number == scheduled_at + delay {
-						<State<T>>::put(StoredState::Paused);
+						State::<T>::put(StoredState::Paused);
 						Self::deposit_event(Event::Paused);
 					}
 				},
@@ -176,7 +176,7 @@ pub mod pallet {
 
 					// enact change to live state
 					if block_number == scheduled_at + delay {
-						<State<T>>::put(StoredState::Live);
+						State::<T>::put(StoredState::Live);
 						Self::deposit_event(Event::Resumed);
 					}
 				},
@@ -297,37 +297,32 @@ pub mod pallet {
 	}
 
 	#[pallet::type_value]
-	pub(super) fn DefaultForState<T: Config>() -> StoredState<BlockNumberFor<T>> {
+	pub fn DefaultForState<T: Config>() -> StoredState<BlockNumberFor<T>> {
 		StoredState::Live
 	}
 
 	/// State of the current authority set.
 	#[pallet::storage]
-	#[pallet::getter(fn state)]
-	pub(super) type State<T: Config> =
+	pub type State<T: Config> =
 		StorageValue<_, StoredState<BlockNumberFor<T>>, ValueQuery, DefaultForState<T>>;
 
 	/// Pending change: (signaled at, scheduled change).
 	#[pallet::storage]
-	#[pallet::getter(fn pending_change)]
-	pub(super) type PendingChange<T: Config> =
+	pub type PendingChange<T: Config> =
 		StorageValue<_, StoredPendingChange<BlockNumberFor<T>, T::MaxAuthorities>>;
 
 	/// next block number where we can force a change.
 	#[pallet::storage]
-	#[pallet::getter(fn next_forced)]
-	pub(super) type NextForced<T: Config> = StorageValue<_, BlockNumberFor<T>>;
+	pub type NextForced<T: Config> = StorageValue<_, BlockNumberFor<T>>;
 
 	/// `true` if we are currently stalled.
 	#[pallet::storage]
-	#[pallet::getter(fn stalled)]
-	pub(super) type Stalled<T: Config> = StorageValue<_, (BlockNumberFor<T>, BlockNumberFor<T>)>;
+	pub type Stalled<T: Config> = StorageValue<_, (BlockNumberFor<T>, BlockNumberFor<T>)>;
 
 	/// The number of changes (both in terms of keys and underlying economic responsibilities)
 	/// in the "set" of Grandpa validators from genesis.
 	#[pallet::storage]
-	#[pallet::getter(fn current_set_id)]
-	pub(super) type CurrentSetId<T: Config> = StorageValue<_, SetId, ValueQuery>;
+	pub type CurrentSetId<T: Config> = StorageValue<_, SetId, ValueQuery>;
 
 	/// A mapping from grandpa set ID to the index of the *most recent* session for which its
 	/// members were responsible.
@@ -340,12 +335,11 @@ pub mod pallet {
 	///
 	/// TWOX-NOTE: `SetId` is not under user control.
 	#[pallet::storage]
-	#[pallet::getter(fn session_for_set)]
-	pub(super) type SetIdSession<T: Config> = StorageMap<_, Twox64Concat, SetId, SessionIndex>;
+	pub type SetIdSession<T: Config> = StorageMap<_, Twox64Concat, SetId, SessionIndex>;
 
 	/// The current list of authorities.
 	#[pallet::storage]
-	pub(crate) type Authorities<T: Config> =
+	pub type Authorities<T: Config> =
 		StorageValue<_, BoundedAuthorityList<T::MaxAuthorities>, ValueQuery>;
 
 	#[derive(frame_support::DefaultNoBound)]
@@ -432,6 +426,44 @@ pub enum StoredState<N> {
 }
 
 impl<T: Config> Pallet<T> {
+	/// State of the current authority set.
+	pub fn state() -> StoredState<BlockNumberFor<T>> {
+		State::<T>::get()
+	}
+
+	/// Pending change: (signaled at, scheduled change).
+	pub fn pending_change() -> Option<StoredPendingChange<BlockNumberFor<T>, T::MaxAuthorities>> {
+		PendingChange::<T>::get()
+	}
+
+	/// next block number where we can force a change.
+	pub fn next_forced() -> Option<BlockNumberFor<T>> {
+		NextForced::<T>::get()
+	}
+
+	/// `true` if we are currently stalled.
+	pub fn stalled() -> Option<(BlockNumberFor<T>, BlockNumberFor<T>)> {
+		Stalled::<T>::get()
+	}
+
+	/// The number of changes (both in terms of keys and underlying economic responsibilities)
+	/// in the "set" of Grandpa validators from genesis.
+	pub fn current_set_id() -> SetId {
+		CurrentSetId::<T>::get()
+	}
+
+	/// A mapping from grandpa set ID to the index of the *most recent* session for which its
+	/// members were responsible.
+	///
+	/// This is only used for validating equivocation proofs. An equivocation proof must
+	/// contains a key-ownership proof for a given session, therefore we need a way to tie
+	/// together sessions and GRANDPA set ids, i.e. we need to validate that a validator
+	/// was the owner of a given key on a given session, and what the active set ID was
+	/// during that session.
+	pub fn session_for_set(set_id: SetId) -> Option<SessionIndex> {
+		SetIdSession::<T>::get(set_id)
+	}
+
 	/// Get the current set of authorities, along with their respective weights.
 	pub fn grandpa_authorities() -> AuthorityList {
 		Authorities::<T>::get().into_inner()
@@ -440,9 +472,9 @@ impl<T: Config> Pallet<T> {
 	/// Schedule GRANDPA to pause starting in the given number of blocks.
 	/// Cannot be done when already paused.
 	pub fn schedule_pause(in_blocks: BlockNumberFor<T>) -> DispatchResult {
-		if let StoredState::Live = <State<T>>::get() {
-			let scheduled_at = <frame_system::Pallet<T>>::block_number();
-			<State<T>>::put(StoredState::PendingPause { delay: in_blocks, scheduled_at });
+		if let StoredState::Live = State::<T>::get() {
+			let scheduled_at = frame_system::Pallet::<T>::block_number();
+			State::<T>::put(StoredState::PendingPause { delay: in_blocks, scheduled_at });
 
 			Ok(())
 		} else {
@@ -452,9 +484,9 @@ impl<T: Config> Pallet<T> {
 
 	/// Schedule a resume of GRANDPA after pausing.
 	pub fn schedule_resume(in_blocks: BlockNumberFor<T>) -> DispatchResult {
-		if let StoredState::Paused = <State<T>>::get() {
-			let scheduled_at = <frame_system::Pallet<T>>::block_number();
-			<State<T>>::put(StoredState::PendingResume { delay: in_blocks, scheduled_at });
+		if let StoredState::Paused = State::<T>::get() {
+			let scheduled_at = frame_system::Pallet::<T>::block_number();
+			State::<T>::put(StoredState::PendingResume { delay: in_blocks, scheduled_at });
 
 			Ok(())
 		} else {
@@ -481,17 +513,17 @@ impl<T: Config> Pallet<T> {
 		in_blocks: BlockNumberFor<T>,
 		forced: Option<BlockNumberFor<T>>,
 	) -> DispatchResult {
-		if !<PendingChange<T>>::exists() {
-			let scheduled_at = <frame_system::Pallet<T>>::block_number();
+		if !PendingChange::<T>::exists() {
+			let scheduled_at = frame_system::Pallet::<T>::block_number();
 
 			if forced.is_some() {
-				if Self::next_forced().map_or(false, |next| next > scheduled_at) {
+				if NextForced::<T>::get().map_or(false, |next| next > scheduled_at) {
 					return Err(Error::<T>::TooSoon.into())
 				}
 
 				// only allow the next forced change when twice the window has passed since
 				// this one.
-				<NextForced<T>>::put(scheduled_at + in_blocks * 2u32.into());
+				NextForced::<T>::put(scheduled_at + in_blocks * 2u32.into());
 			}
 
 			let next_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from(
@@ -502,7 +534,7 @@ impl<T: Config> Pallet<T> {
 				),
 			);
 
-			<PendingChange<T>>::put(StoredPendingChange {
+			PendingChange::<T>::put(StoredPendingChange {
 				delay: in_blocks,
 				scheduled_at,
 				next_authorities,
@@ -518,7 +550,7 @@ impl<T: Config> Pallet<T> {
 	/// Deposit one of this module's logs.
 	fn deposit_log(log: ConsensusLog<BlockNumberFor<T>>) {
 		let log = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode());
-		<frame_system::Pallet<T>>::deposit_log(log);
+		frame_system::Pallet::<T>::deposit_log(log);
 	}
 
 	// Perform module initialization, abstracted so that it can be called either through genesis
@@ -554,7 +586,7 @@ impl<T: Config> Pallet<T> {
 		// when we record old authority sets we could try to figure out _who_
 		// failed. until then, we can't meaningfully guard against
 		// `next == last` the way that normal session changes do.
-		<Stalled<T>>::put((further_wait, median));
+		Stalled::<T>::put((further_wait, median));
 	}
 }
 
@@ -583,10 +615,10 @@ where
 		// Always issue a change if `session` says that the validators have changed.
 		// Even if their session keys are the same as before, the underlying economic
 		// identities have changed.
-		let current_set_id = if changed || <Stalled<T>>::exists() {
+		let current_set_id = if changed || Stalled::<T>::exists() {
 			let next_authorities = validators.map(|(_, k)| (k, 1)).collect::<Vec<_>>();
 
-			let res = if let Some((further_wait, median)) = <Stalled<T>>::take() {
+			let res = if let Some((further_wait, median)) = Stalled::<T>::take() {
 				Self::schedule_change(next_authorities, further_wait, Some(median))
 			} else {
 				Self::schedule_change(next_authorities, Zero::zero(), None)
@@ -608,17 +640,17 @@ where
 				// either the session module signalled that the validators have changed
 				// or the set was stalled. but since we didn't successfully schedule
 				// an authority set change we do not increment the set id.
-				Self::current_set_id()
+				CurrentSetId::<T>::get()
 			}
 		} else {
 			// nothing's changed, neither economic conditions nor session keys. update the pointer
 			// of the current set.
-			Self::current_set_id()
+			CurrentSetId::<T>::get()
 		};
 
 		// update the mapping to note that the current set corresponds to the
 		// latest equivalent session (i.e. now).
-		let session_index = <pallet_session::Pallet<T>>::current_index();
+		let session_index = pallet_session::Pallet::<T>::current_index();
 		SetIdSession::<T>::insert(current_set_id, &session_index);
 	}
 
diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs
index 383f77f00de71d29cf6c7201f44ea628af2ce7fd..f4720966b17974ed2d10456310dc84c085d1757a 100644
--- a/substrate/frame/grandpa/src/tests.rs
+++ b/substrate/frame/grandpa/src/tests.rs
@@ -110,7 +110,7 @@ fn cannot_schedule_change_when_one_pending() {
 	new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
 		initialize_block(1, Default::default());
 		Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap();
-		assert!(<PendingChange<Test>>::exists());
+		assert!(PendingChange::<Test>::exists());
 		assert_noop!(
 			Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None),
 			Error::<Test>::ChangePending
@@ -120,7 +120,7 @@ fn cannot_schedule_change_when_one_pending() {
 		let header = System::finalize();
 
 		initialize_block(2, header.hash());
-		assert!(<PendingChange<Test>>::exists());
+		assert!(PendingChange::<Test>::exists());
 		assert_noop!(
 			Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None),
 			Error::<Test>::ChangePending
@@ -130,7 +130,7 @@ fn cannot_schedule_change_when_one_pending() {
 		let header = System::finalize();
 
 		initialize_block(3, header.hash());
-		assert!(!<PendingChange<Test>>::exists());
+		assert!(!PendingChange::<Test>::exists());
 		assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None));
 
 		Grandpa::on_finalize(3);
@@ -144,7 +144,7 @@ fn dispatch_forced_change() {
 		initialize_block(1, Default::default());
 		Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 5, Some(0)).unwrap();
 
-		assert!(<PendingChange<Test>>::exists());
+		assert!(PendingChange::<Test>::exists());
 		assert_noop!(
 			Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)),
 			Error::<Test>::ChangePending
@@ -155,8 +155,8 @@ fn dispatch_forced_change() {
 
 		for i in 2..7 {
 			initialize_block(i, header.hash());
-			assert!(<PendingChange<Test>>::get().unwrap().forced.is_some());
-			assert_eq!(Grandpa::next_forced(), Some(11));
+			assert!(PendingChange::<Test>::get().unwrap().forced.is_some());
+			assert_eq!(NextForced::<Test>::get(), Some(11));
 			assert_noop!(
 				Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None),
 				Error::<Test>::ChangePending
@@ -174,7 +174,7 @@ fn dispatch_forced_change() {
 		// add a normal change.
 		{
 			initialize_block(7, header.hash());
-			assert!(!<PendingChange<Test>>::exists());
+			assert!(!PendingChange::<Test>::exists());
 			assert_eq!(
 				Grandpa::grandpa_authorities(),
 				to_authorities(vec![(4, 1), (5, 1), (6, 1)])
@@ -187,7 +187,7 @@ fn dispatch_forced_change() {
 		// run the normal change.
 		{
 			initialize_block(8, header.hash());
-			assert!(<PendingChange<Test>>::exists());
+			assert!(PendingChange::<Test>::exists());
 			assert_eq!(
 				Grandpa::grandpa_authorities(),
 				to_authorities(vec![(4, 1), (5, 1), (6, 1)])
@@ -204,9 +204,9 @@ fn dispatch_forced_change() {
 		// time.
 		for i in 9..11 {
 			initialize_block(i, header.hash());
-			assert!(!<PendingChange<Test>>::exists());
+			assert!(!PendingChange::<Test>::exists());
 			assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(5, 1)]));
-			assert_eq!(Grandpa::next_forced(), Some(11));
+			assert_eq!(NextForced::<Test>::get(), Some(11));
 			assert_noop!(
 				Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)),
 				Error::<Test>::TooSoon
@@ -217,13 +217,13 @@ fn dispatch_forced_change() {
 
 		{
 			initialize_block(11, header.hash());
-			assert!(!<PendingChange<Test>>::exists());
+			assert!(!PendingChange::<Test>::exists());
 			assert_ok!(Grandpa::schedule_change(
 				to_authorities(vec![(5, 1), (6, 1), (7, 1)]),
 				5,
 				Some(0)
 			));
-			assert_eq!(Grandpa::next_forced(), Some(21));
+			assert_eq!(NextForced::<Test>::get(), Some(21));
 			Grandpa::on_finalize(11);
 			header = System::finalize();
 		}
@@ -239,7 +239,10 @@ fn schedule_pause_only_when_live() {
 		Grandpa::schedule_pause(1).unwrap();
 
 		// we've switched to the pending pause state
-		assert_eq!(Grandpa::state(), StoredState::PendingPause { scheduled_at: 1u64, delay: 1 });
+		assert_eq!(
+			State::<Test>::get(),
+			StoredState::PendingPause { scheduled_at: 1u64, delay: 1 }
+		);
 
 		Grandpa::on_finalize(1);
 		let _ = System::finalize();
@@ -253,7 +256,7 @@ fn schedule_pause_only_when_live() {
 		let _ = System::finalize();
 
 		// after finalizing block 2 the set should have switched to paused state
-		assert_eq!(Grandpa::state(), StoredState::Paused);
+		assert_eq!(State::<Test>::get(), StoredState::Paused);
 	});
 }
 
@@ -265,14 +268,14 @@ fn schedule_resume_only_when_paused() {
 		// the set is currently live, resuming it is an error
 		assert_noop!(Grandpa::schedule_resume(1), Error::<Test>::ResumeFailed);
 
-		assert_eq!(Grandpa::state(), StoredState::Live);
+		assert_eq!(State::<Test>::get(), StoredState::Live);
 
 		// we schedule a pause to be applied instantly
 		Grandpa::schedule_pause(0).unwrap();
 		Grandpa::on_finalize(1);
 		let _ = System::finalize();
 
-		assert_eq!(Grandpa::state(), StoredState::Paused);
+		assert_eq!(State::<Test>::get(), StoredState::Paused);
 
 		// we schedule the set to go back live in 2 blocks
 		initialize_block(2, Default::default());
@@ -289,7 +292,7 @@ fn schedule_resume_only_when_paused() {
 		let _ = System::finalize();
 
 		// it should be live at block 4
-		assert_eq!(Grandpa::state(), StoredState::Live);
+		assert_eq!(State::<Test>::get(), StoredState::Live);
 	});
 }
 
@@ -342,7 +345,7 @@ fn report_equivocation_current_set_works() {
 		let equivocation_key = &authorities[equivocation_authority_index].0;
 		let equivocation_keyring = extract_keyring(equivocation_key);
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof, with two votes in the same round for
 		// different block hashes signed by the same key
@@ -424,7 +427,7 @@ fn report_equivocation_old_set_works() {
 
 		let equivocation_keyring = extract_keyring(equivocation_key);
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof for the old set,
 		let equivocation_proof = generate_equivocation_proof(
@@ -487,7 +490,7 @@ fn report_equivocation_invalid_set_id() {
 		let key_owner_proof =
 			Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap();
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation for a future set
 		let equivocation_proof = generate_equivocation_proof(
@@ -527,7 +530,7 @@ fn report_equivocation_invalid_session() {
 
 		start_era(2);
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof at set id = 2
 		let equivocation_proof = generate_equivocation_proof(
@@ -568,7 +571,7 @@ fn report_equivocation_invalid_key_owner_proof() {
 		let equivocation_key = &authorities[equivocation_authority_index].0;
 		let equivocation_keyring = extract_keyring(equivocation_key);
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof for the authority at index 0
 		let equivocation_proof = generate_equivocation_proof(
@@ -611,7 +614,7 @@ fn report_equivocation_invalid_equivocation_proof() {
 		let key_owner_proof =
 			Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap();
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		let assert_invalid_equivocation_proof = |equivocation_proof| {
 			assert_err!(
@@ -675,7 +678,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() {
 		let equivocation_authority_index = 0;
 		let equivocation_key = &authorities[equivocation_authority_index].0;
 		let equivocation_keyring = extract_keyring(equivocation_key);
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		let equivocation_proof = generate_equivocation_proof(
 			set_id,
@@ -748,12 +751,12 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() {
 #[test]
 fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() {
 	new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
-		assert_eq!(Grandpa::current_set_id(), 0);
+		assert_eq!(CurrentSetId::<Test>::get(), 0);
 
 		// starting a new era should lead to a change in the session
 		// validators and trigger a new set
 		start_era(1);
-		assert_eq!(Grandpa::current_set_id(), 1);
+		assert_eq!(CurrentSetId::<Test>::get(), 1);
 
 		// we schedule a change delayed by 2 blocks, this should make it so that
 		// when we try to rotate the session at the beginning of the era we will
@@ -761,22 +764,22 @@ fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() {
 		// not increment the set id.
 		Grandpa::schedule_change(to_authorities(vec![(1, 1)]), 2, None).unwrap();
 		start_era(2);
-		assert_eq!(Grandpa::current_set_id(), 1);
+		assert_eq!(CurrentSetId::<Test>::get(), 1);
 
 		// everything should go back to normal after.
 		start_era(3);
-		assert_eq!(Grandpa::current_set_id(), 2);
+		assert_eq!(CurrentSetId::<Test>::get(), 2);
 
 		// session rotation might also fail to schedule a change if it's for a
 		// forced change (i.e. grandpa is stalled) and it is too soon.
-		<NextForced<Test>>::put(1000);
-		<Stalled<Test>>::put((30, 1));
+		NextForced::<Test>::put(1000);
+		Stalled::<Test>::put((30, 1));
 
 		// NOTE: we cannot go through normal era rotation since having `Stalled`
 		// defined will also trigger a new set (regardless of whether the
 		// session validators changed)
 		Grandpa::on_new_session(true, std::iter::empty(), std::iter::empty());
-		assert_eq!(Grandpa::current_set_id(), 2);
+		assert_eq!(CurrentSetId::<Test>::get(), 2);
 	});
 }
 
@@ -790,19 +793,19 @@ fn cleans_up_old_set_id_session_mappings() {
 		// we should have a session id mapping for all the set ids from
 		// `max_set_id_session_entries` eras we have observed
 		for i in 1..=max_set_id_session_entries {
-			assert!(Grandpa::session_for_set(i as u64).is_some());
+			assert!(SetIdSession::<Test>::get(i as u64).is_some());
 		}
 
 		start_era(max_set_id_session_entries * 2);
 
 		// we should keep tracking the new mappings for new eras
 		for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 {
-			assert!(Grandpa::session_for_set(i as u64).is_some());
+			assert!(SetIdSession::<Test>::get(i as u64).is_some());
 		}
 
 		// but the old ones should have been pruned by now
 		for i in 1..=max_set_id_session_entries {
-			assert!(Grandpa::session_for_set(i as u64).is_none());
+			assert!(SetIdSession::<Test>::get(i as u64).is_none());
 		}
 	});
 }
@@ -812,24 +815,24 @@ fn always_schedules_a_change_on_new_session_when_stalled() {
 	new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
 		start_era(1);
 
-		assert!(Grandpa::pending_change().is_none());
-		assert_eq!(Grandpa::current_set_id(), 1);
+		assert!(PendingChange::<Test>::get().is_none());
+		assert_eq!(CurrentSetId::<Test>::get(), 1);
 
 		// if the session handler reports no change then we should not schedule
 		// any pending change
 		Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty());
 
-		assert!(Grandpa::pending_change().is_none());
-		assert_eq!(Grandpa::current_set_id(), 1);
+		assert!(PendingChange::<Test>::get().is_none());
+		assert_eq!(CurrentSetId::<Test>::get(), 1);
 
 		// if grandpa is stalled then we should **always** schedule a forced
 		// change on a new session
-		<Stalled<Test>>::put((10, 1));
+		Stalled::<Test>::put((10, 1));
 		Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty());
 
-		assert!(Grandpa::pending_change().is_some());
-		assert!(Grandpa::pending_change().unwrap().forced.is_some());
-		assert_eq!(Grandpa::current_set_id(), 2);
+		assert!(PendingChange::<Test>::get().is_some());
+		assert!(PendingChange::<Test>::get().unwrap().forced.is_some());
+		assert_eq!(CurrentSetId::<Test>::get(), 2);
 	});
 }
 
@@ -861,7 +864,7 @@ fn valid_equivocation_reports_dont_pay_fees() {
 
 		let equivocation_key = &Grandpa::grandpa_authorities()[0].0;
 		let equivocation_keyring = extract_keyring(equivocation_key);
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof.
 		let equivocation_proof = generate_equivocation_proof(
diff --git a/substrate/frame/metadata-hash-extension/src/tests.rs b/substrate/frame/metadata-hash-extension/src/tests.rs
index 11a3345ee15ce2b7217f6070704f6bfbdabf78b9..7a6966f4629027dfe25cc65a866b415a3d9d45ea 100644
--- a/substrate/frame/metadata-hash-extension/src/tests.rs
+++ b/substrate/frame/metadata-hash-extension/src/tests.rs
@@ -144,6 +144,7 @@ mod docs {
 			// Add the `CheckMetadataHash` extension.
 			// The position in this list is not important, so we could also add it to beginning.
 			frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+			frame_system::WeightReclaim<Runtime>,
 		);
 
 		/// In your runtime this will be your real address type.
diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml
index bb5e8486456658bf27835d56d5663e5cce0855a9..0ae3b3938c608b6ad10a0b6a06f256b463c50564 100644
--- a/substrate/frame/mixnet/Cargo.toml
+++ b/substrate/frame/mixnet/Cargo.toml
@@ -17,42 +17,24 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive", "max-encoded-len"], workspace = true }
-frame-benchmarking = { optional = true, workspace = true }
-frame-support = { workspace = true }
-frame-system = { workspace = true }
+frame = { workspace = true, features = ["experimental", "runtime"] }
 log = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
 serde = { features = ["derive"], workspace = true }
 sp-application-crypto = { workspace = true }
-sp-arithmetic = { workspace = true }
-sp-io = { workspace = true }
 sp-mixnet = { workspace = true }
-sp-runtime = { workspace = true }
 
 [features]
 default = ["std"]
 std = [
 	"codec/std",
-	"frame-benchmarking?/std",
-	"frame-support/std",
-	"frame-system/std",
+	"frame/std",
 	"log/std",
 	"scale-info/std",
 	"serde/std",
 	"sp-application-crypto/std",
-	"sp-arithmetic/std",
-	"sp-io/std",
 	"sp-mixnet/std",
-	"sp-runtime/std",
-]
-runtime-benchmarks = [
-	"frame-benchmarking/runtime-benchmarks",
-	"frame-support/runtime-benchmarks",
-	"frame-system/runtime-benchmarks",
-	"sp-runtime/runtime-benchmarks",
 ]
 try-runtime = [
-	"frame-support/try-runtime",
-	"frame-system/try-runtime",
-	"sp-runtime/try-runtime",
+	"frame/try-runtime",
 ]
diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs
index 6579ed678ae7a6a10200c80284a749acdb4bfbae..9849818176760cf9dfadb0a72d94b8c0206cc758 100644
--- a/substrate/frame/mixnet/src/lib.rs
+++ b/substrate/frame/mixnet/src/lib.rs
@@ -23,28 +23,23 @@
 
 extern crate alloc;
 
+pub use pallet::*;
+
 use alloc::vec::Vec;
-use codec::{Decode, Encode, MaxEncodedLen};
 use core::cmp::Ordering;
-use frame_support::{
-	traits::{EstimateNextSessionRotation, Get, OneSessionHandler},
-	BoundedVec,
+use frame::{
+	deps::{
+		sp_io::{self, MultiRemovalResults},
+		sp_runtime,
+	},
+	prelude::*,
 };
-use frame_system::{
-	offchain::{CreateInherent, SubmitTransaction},
-	pallet_prelude::BlockNumberFor,
-};
-pub use pallet::*;
-use scale_info::TypeInfo;
 use serde::{Deserialize, Serialize};
 use sp_application_crypto::RuntimeAppPublic;
-use sp_arithmetic::traits::{CheckedSub, Saturating, UniqueSaturatedInto, Zero};
-use sp_io::MultiRemovalResults;
 use sp_mixnet::types::{
 	AuthorityId, AuthoritySignature, KxPublic, Mixnode, MixnodesErr, PeerId, SessionIndex,
 	SessionPhase, SessionStatus, KX_PUBLIC_SIZE,
 };
-use sp_runtime::RuntimeDebug;
 
 const LOG_TARGET: &str = "runtime::mixnet";
 
@@ -168,12 +163,9 @@ fn twox<BlockNumber: UniqueSaturatedInto<u64>>(
 // The pallet
 ////////////////////////////////////////////////////////////////////////////////
 
-#[frame_support::pallet(dev_mode)]
+#[frame::pallet(dev_mode)]
 pub mod pallet {
 	use super::*;
-	use frame_support::pallet_prelude::*;
-	use frame_system::pallet_prelude::*;
-
 	#[pallet::pallet]
 	pub struct Pallet<T>(_);
 
@@ -254,7 +246,7 @@ pub mod pallet {
 		StorageDoubleMap<_, Identity, SessionIndex, Identity, AuthorityIndex, BoundedMixnodeFor<T>>;
 
 	#[pallet::genesis_config]
-	#[derive(frame_support::DefaultNoBound)]
+	#[derive(DefaultNoBound)]
 	pub struct GenesisConfig<T: Config> {
 		/// The mixnode set for the very first session.
 		pub mixnodes: BoundedVec<BoundedMixnodeFor<T>, T::MaxAuthorities>,
@@ -308,7 +300,7 @@ pub mod pallet {
 
 		fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity {
 			let Self::Call::register { registration, signature } = call else {
-				return InvalidTransaction::Call.into()
+				return InvalidTransaction::Call.into();
 			};
 
 			// Check session index matches
@@ -320,16 +312,16 @@ pub mod pallet {
 
 			// Check authority index is valid
 			if registration.authority_index >= T::MaxAuthorities::get() {
-				return InvalidTransaction::BadProof.into()
+				return InvalidTransaction::BadProof.into();
 			}
 			let Some(authority_id) = NextAuthorityIds::<T>::get(registration.authority_index)
 			else {
-				return InvalidTransaction::BadProof.into()
+				return InvalidTransaction::BadProof.into();
 			};
 
 			// Check the authority hasn't registered a mixnode yet
 			if Self::already_registered(registration.session_index, registration.authority_index) {
-				return InvalidTransaction::Stale.into()
+				return InvalidTransaction::Stale.into();
 			}
 
 			// Check signature. Note that we don't use regular signed transactions for registration
@@ -339,7 +331,7 @@ pub mod pallet {
 				authority_id.verify(&encoded_registration, signature)
 			});
 			if !signature_ok {
-				return InvalidTransaction::BadProof.into()
+				return InvalidTransaction::BadProof.into();
 			}
 
 			ValidTransaction::with_tag_prefix("MixnetRegistration")
@@ -368,12 +360,12 @@ impl<T: Config> Pallet<T> {
 			.saturating_sub(CurrentSessionStartBlock::<T>::get());
 		let Some(block_in_phase) = block_in_phase.checked_sub(&T::NumCoverToCurrentBlocks::get())
 		else {
-			return SessionPhase::CoverToCurrent
+			return SessionPhase::CoverToCurrent;
 		};
 		let Some(block_in_phase) =
 			block_in_phase.checked_sub(&T::NumRequestsToCurrentBlocks::get())
 		else {
-			return SessionPhase::RequestsToCurrent
+			return SessionPhase::RequestsToCurrent;
 		};
 		if block_in_phase < T::NumCoverToPrevBlocks::get() {
 			SessionPhase::CoverToPrev
@@ -411,7 +403,7 @@ impl<T: Config> Pallet<T> {
 			return Err(MixnodesErr::InsufficientRegistrations {
 				num: 0,
 				min: T::MinMixnodes::get(),
-			})
+			});
 		};
 		Self::mixnodes(prev_session_index)
 	}
@@ -430,7 +422,7 @@ impl<T: Config> Pallet<T> {
 		// registering
 		let block_in_session = block_number.saturating_sub(CurrentSessionStartBlock::<T>::get());
 		if block_in_session < T::NumRegisterStartSlackBlocks::get() {
-			return false
+			return false;
 		}
 
 		let (Some(end_block), _weight) =
@@ -438,7 +430,7 @@ impl<T: Config> Pallet<T> {
 		else {
 			// Things aren't going to work terribly well in this case as all the authorities will
 			// just pile in after the slack period...
-			return true
+			return true;
 		};
 
 		let remaining_blocks = end_block
@@ -447,7 +439,7 @@ impl<T: Config> Pallet<T> {
 		if remaining_blocks.is_zero() {
 			// Into the slack time at the end of the session. Not necessarily too late;
 			// registrations are accepted right up until the session ends.
-			return true
+			return true;
 		}
 
 		// Want uniform distribution over the remaining blocks, so pick this block with probability
@@ -496,7 +488,7 @@ impl<T: Config> Pallet<T> {
 				"Session {session_index} registration attempted, \
 				but current session is {current_session_index}",
 			);
-			return false
+			return false;
 		}
 
 		let block_number = frame_system::Pallet::<T>::block_number();
@@ -505,7 +497,7 @@ impl<T: Config> Pallet<T> {
 				target: LOG_TARGET,
 				"Waiting for the session to progress further before registering",
 			);
-			return false
+			return false;
 		}
 
 		let Some((authority_index, authority_id)) = Self::next_local_authority() else {
@@ -513,7 +505,7 @@ impl<T: Config> Pallet<T> {
 				target: LOG_TARGET,
 				"Not an authority in the next session; cannot register a mixnode",
 			);
-			return false
+			return false;
 		};
 
 		if Self::already_registered(session_index, authority_index) {
@@ -521,14 +513,14 @@ impl<T: Config> Pallet<T> {
 				target: LOG_TARGET,
 				"Already registered a mixnode for the next session",
 			);
-			return false
+			return false;
 		}
 
 		let registration =
 			Registration { block_number, session_index, authority_index, mixnode: mixnode.into() };
 		let Some(signature) = authority_id.sign(&registration.encode()) else {
 			log::debug!(target: LOG_TARGET, "Failed to sign registration");
-			return false
+			return false;
 		};
 		let call = Call::register { registration, signature };
 		let xt = T::create_inherent(call.into());
diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml
index 7f6df86ed0e5779d56c48c1fa334ede9e2a12b6c..23537b2278933db7cbacaa9f490848738a5d0224 100644
--- a/substrate/frame/nft-fractionalization/Cargo.toml
+++ b/substrate/frame/nft-fractionalization/Cargo.toml
@@ -30,7 +30,6 @@ sp-runtime = { workspace = true }
 pallet-balances = { workspace = true, default-features = true }
 sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 [features]
 default = ["std"]
diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml
index 17473649393411047ec3e2b56d0306e5c228a3ca..7e55ad178091ffc42d858c80d4729a6f6d86d362 100644
--- a/substrate/frame/node-authorization/Cargo.toml
+++ b/substrate/frame/node-authorization/Cargo.toml
@@ -16,28 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive"], workspace = true }
-frame-support = { workspace = true }
-frame-system = { workspace = true }
+frame = { workspace = true, features = ["experimental", "runtime"] }
 log = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
-sp-core = { workspace = true }
-sp-io = { workspace = true }
-sp-runtime = { workspace = true }
 
 [features]
 default = ["std"]
 std = [
 	"codec/std",
-	"frame-support/std",
-	"frame-system/std",
+	"frame/std",
 	"log/std",
 	"scale-info/std",
-	"sp-core/std",
-	"sp-io/std",
-	"sp-runtime/std",
 ]
 try-runtime = [
-	"frame-support/try-runtime",
-	"frame-system/try-runtime",
-	"sp-runtime/try-runtime",
+	"frame/try-runtime",
 ]
diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs
index 7682b54ea0f242b9c09a72bd6f134aa584598350..3cec0d3bcb63d7ea6944e923a64163f3c1831ae0 100644
--- a/substrate/frame/node-authorization/src/lib.rs
+++ b/substrate/frame/node-authorization/src/lib.rs
@@ -47,18 +47,18 @@ pub mod weights;
 extern crate alloc;
 
 use alloc::{collections::btree_set::BTreeSet, vec::Vec};
+use frame::{
+	deps::{sp_core::OpaquePeerId as PeerId, sp_io},
+	prelude::*,
+};
 pub use pallet::*;
-use sp_core::OpaquePeerId as PeerId;
-use sp_runtime::traits::StaticLookup;
 pub use weights::WeightInfo;
 
 type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;
 
-#[frame_support::pallet]
+#[frame::pallet]
 pub mod pallet {
 	use super::*;
-	use frame_support::pallet_prelude::*;
-	use frame_system::pallet_prelude::*;
 
 	#[pallet::pallet]
 	#[pallet::without_storage_info]
@@ -111,7 +111,7 @@ pub mod pallet {
 		StorageMap<_, Blake2_128Concat, PeerId, BTreeSet<PeerId>, ValueQuery>;
 
 	#[pallet::genesis_config]
-	#[derive(frame_support::DefaultNoBound)]
+	#[derive(DefaultNoBound)]
 	pub struct GenesisConfig<T: Config> {
 		pub nodes: Vec<(PeerId, T::AccountId)>,
 	}
@@ -171,7 +171,7 @@ pub mod pallet {
 	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
 		/// Set reserved node every block. It may not be enabled depends on the offchain
 		/// worker settings when starting the node.
-		fn offchain_worker(now: frame_system::pallet_prelude::BlockNumberFor<T>) {
+		fn offchain_worker(now: BlockNumberFor<T>) {
 			let network_state = sp_io::offchain::network_state();
 			match network_state {
 				Err(_) => log::error!(
diff --git a/substrate/frame/node-authorization/src/mock.rs b/substrate/frame/node-authorization/src/mock.rs
index 656d2bfa39ad3d4b2ac22c135691c66dd9079b4a..c6665a479e1147b8ce2a51232d705d0865f360d9 100644
--- a/substrate/frame/node-authorization/src/mock.rs
+++ b/substrate/frame/node-authorization/src/mock.rs
@@ -20,13 +20,11 @@
 use super::*;
 use crate as pallet_node_authorization;
 
-use frame_support::{derive_impl, ord_parameter_types, traits::ConstU32};
-use frame_system::EnsureSignedBy;
-use sp_runtime::BuildStorage;
+use frame::testing_prelude::*;
 
 type Block = frame_system::mocking::MockBlock<Test>;
 
-frame_support::construct_runtime!(
+construct_runtime!(
 	pub enum Test
 	{
 		System: frame_system,
@@ -61,7 +59,7 @@ pub fn test_node(id: u8) -> PeerId {
 	PeerId(vec![id])
 }
 
-pub fn new_test_ext() -> sp_io::TestExternalities {
+pub fn new_test_ext() -> TestState {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_node_authorization::GenesisConfig::<Test> {
 		nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)],
diff --git a/substrate/frame/node-authorization/src/tests.rs b/substrate/frame/node-authorization/src/tests.rs
index 4704b5adf2690f842480eb86356ee9c77437b010..cf60ab6efbd88bbea8060390cb7f95413b23bc9b 100644
--- a/substrate/frame/node-authorization/src/tests.rs
+++ b/substrate/frame/node-authorization/src/tests.rs
@@ -19,8 +19,7 @@
 
 use super::*;
 use crate::mock::*;
-use frame_support::{assert_noop, assert_ok};
-use sp_runtime::traits::BadOrigin;
+use frame::testing_prelude::*;
 
 #[test]
 fn add_well_known_node_works() {
diff --git a/substrate/frame/node-authorization/src/weights.rs b/substrate/frame/node-authorization/src/weights.rs
index 881eeaf7a4c090573eab463494fd9a6846ae4b40..cd2935458b9daeefa6df80c2ab4d394b97ff431f 100644
--- a/substrate/frame/node-authorization/src/weights.rs
+++ b/substrate/frame/node-authorization/src/weights.rs
@@ -21,8 +21,7 @@
 #![allow(unused_parens)]
 #![allow(unused_imports)]
 
-use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
-use core::marker::PhantomData;
+use frame::weights_prelude::*;
 
 pub trait WeightInfo {
 	fn add_well_known_node() -> Weight;
diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml
index fe3743d7e5dac443a801ac88b8c2605c7235ac2e..62c2fb625fc4f3e1c4a7252efa7750ec263dd0e8 100644
--- a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml
+++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml
@@ -23,7 +23,6 @@ sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
 sp-runtime = { workspace = true, default-features = true }
 sp-staking = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 frame-election-provider-support = { workspace = true, default-features = true }
 frame-support = { features = ["experimental"], workspace = true, default-features = true }
diff --git a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml
index 2cdc4c41a0836223c2231e2428947f560f8a1d45..0b21d5f4e8cf1e3a98e3220a17bfe7eeb0b44cc0 100644
--- a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml
+++ b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml
@@ -23,7 +23,6 @@ sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
 sp-runtime = { workspace = true, default-features = true }
 sp-staking = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 frame-election-provider-support = { workspace = true, default-features = true }
 frame-support = { workspace = true, default-features = true }
diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs
index 658e7fec534827a9f82f0756840303d20a725233..849ffddf4fb3c0c4e88fbf534a33312d0e2a83b9 100644
--- a/substrate/frame/preimage/src/lib.rs
+++ b/substrate/frame/preimage/src/lib.rs
@@ -236,7 +236,7 @@ pub mod pallet {
 			Self::do_unrequest_preimage(&hash)
 		}
 
-		/// Ensure that the a bulk of pre-images is upgraded.
+		/// Ensure that the bulk of pre-images is upgraded.
 		///
 		/// The caller pays no fee if at least 90% of pre-images were successfully updated.
 		#[pallet::call_index(4)]
diff --git a/substrate/frame/recovery/README.md b/substrate/frame/recovery/README.md
index 7e2dd7a23619ac893061d0730f7813bdc7e33e98..39f6914070464df82aaea3d69b975319a0670a33 100644
--- a/substrate/frame/recovery/README.md
+++ b/substrate/frame/recovery/README.md
@@ -62,12 +62,12 @@ The intended life cycle of a successful recovery takes the following steps:
 
 ### Malicious Recovery Attempts
 
-Initializing a the recovery process for a recoverable account is open and
+Initializing the recovery process for a recoverable account is open and
 permissionless. However, the recovery deposit is an economic deterrent that
 should disincentivize would-be attackers from trying to maliciously recover
 accounts.
 
-The recovery deposit can always be claimed by the account which is trying to
+The recovery deposit can always be claimed by the account which is trying
 to be recovered. In the case of a malicious recovery attempt, the account
 owner who still has access to their account can claim the deposit and
 essentially punish the malicious user.
diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs
index f8622880538efceb7b377e64b9dfe26b994bd7fb..42fb641983f6308a9809ca8ec856ee2d019ab569 100644
--- a/substrate/frame/recovery/src/lib.rs
+++ b/substrate/frame/recovery/src/lib.rs
@@ -75,12 +75,12 @@
 //!
 //! ### Malicious Recovery Attempts
 //!
-//! Initializing a the recovery process for a recoverable account is open and
+//! Initializing the recovery process for a recoverable account is open and
 //! permissionless. However, the recovery deposit is an economic deterrent that
 //! should disincentivize would-be attackers from trying to maliciously recover
 //! accounts.
 //!
-//! The recovery deposit can always be claimed by the account which is trying to
+//! The recovery deposit can always be claimed by the account which is trying
 //! to be recovered. In the case of a malicious recovery attempt, the account
 //! owner who still has access to their account can claim the deposit and
 //! essentially punish the malicious user.
@@ -156,7 +156,10 @@ use alloc::{boxed::Box, vec::Vec};
 use codec::{Decode, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 use sp_runtime::{
-	traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion, StaticLookup},
+	traits::{
+		BlockNumberProvider, CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion,
+		StaticLookup,
+	},
 	RuntimeDebug,
 };
 
@@ -178,11 +181,12 @@ mod mock;
 mod tests;
 pub mod weights;
 
+type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;
 type BalanceOf<T> =
 	<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
-
+type BlockNumberFromProviderOf<T> =
+	<<T as Config>::BlockNumberProvider as BlockNumberProvider>::BlockNumber;
 type FriendsOf<T> = BoundedVec<<T as frame_system::Config>::AccountId, <T as Config>::MaxFriends>;
-type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;
 
 /// An active recovery process.
 #[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)]
@@ -190,7 +194,7 @@ pub struct ActiveRecovery<BlockNumber, Balance, Friends> {
 	/// The block number when the recovery process started.
 	created: BlockNumber,
 	/// The amount held in reserve of the `depositor`,
-	/// To be returned once this recovery process is closed.
+	/// to be returned once this recovery process is closed.
 	deposit: Balance,
 	/// The friends which have vouched so far. Always sorted.
 	friends: Friends,
@@ -236,6 +240,9 @@ pub mod pallet {
 			+ GetDispatchInfo
 			+ From<frame_system::Call<Self>>;
 
+		/// Provider for the block number. Normally this is the `frame_system` pallet.
+		type BlockNumberProvider: BlockNumberProvider;
+
 		/// The currency mechanism.
 		type Currency: ReservableCurrency<Self::AccountId>;
 
@@ -339,7 +346,7 @@ pub mod pallet {
 		_,
 		Twox64Concat,
 		T::AccountId,
-		RecoveryConfig<BlockNumberFor<T>, BalanceOf<T>, FriendsOf<T>>,
+		RecoveryConfig<BlockNumberFromProviderOf<T>, BalanceOf<T>, FriendsOf<T>>,
 	>;
 
 	/// Active recovery attempts.
@@ -354,7 +361,7 @@ pub mod pallet {
 		T::AccountId,
 		Twox64Concat,
 		T::AccountId,
-		ActiveRecovery<BlockNumberFor<T>, BalanceOf<T>, FriendsOf<T>>,
+		ActiveRecovery<BlockNumberFromProviderOf<T>, BalanceOf<T>, FriendsOf<T>>,
 	>;
 
 	/// The list of allowed proxy accounts.
@@ -396,7 +403,7 @@ pub mod pallet {
 				.map_err(|e| e.error)
 		}
 
-		/// Allow ROOT to bypass the recovery process and set an a rescuer account
+		/// Allow ROOT to bypass the recovery process and set a rescuer account
 		/// for a lost account directly.
 		///
 		/// The dispatch origin for this call must be _ROOT_.
@@ -445,7 +452,7 @@ pub mod pallet {
 			origin: OriginFor<T>,
 			friends: Vec<T::AccountId>,
 			threshold: u16,
-			delay_period: BlockNumberFor<T>,
+			delay_period: BlockNumberFromProviderOf<T>,
 		) -> DispatchResult {
 			let who = ensure_signed(origin)?;
 			// Check account is not already set up for recovery
@@ -511,7 +518,7 @@ pub mod pallet {
 			T::Currency::reserve(&who, recovery_deposit)?;
 			// Create an active recovery status
 			let recovery_status = ActiveRecovery {
-				created: <frame_system::Pallet<T>>::block_number(),
+				created: T::BlockNumberProvider::current_block_number(),
 				deposit: recovery_deposit,
 				friends: Default::default(),
 			};
@@ -596,7 +603,7 @@ pub mod pallet {
 				Self::active_recovery(&account, &who).ok_or(Error::<T>::NotStarted)?;
 			ensure!(!Proxy::<T>::contains_key(&who), Error::<T>::AlreadyProxy);
 			// Make sure the delay period has passed
-			let current_block_number = <frame_system::Pallet<T>>::block_number();
+			let current_block_number = T::BlockNumberProvider::current_block_number();
 			let recoverable_block_number = active_recovery
 				.created
 				.checked_add(&recovery_config.delay_period)
diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs
index 8e30cbe997e17b80e8ef74a598b15343e6affdef..3930db82d6c77ad0133e0726e904c0ac0efb4bc7 100644
--- a/substrate/frame/recovery/src/mock.rs
+++ b/substrate/frame/recovery/src/mock.rs
@@ -66,6 +66,7 @@ impl Config for Test {
 	type RuntimeEvent = RuntimeEvent;
 	type WeightInfo = ();
 	type RuntimeCall = RuntimeCall;
+	type BlockNumberProvider = System;
 	type Currency = Balances;
 	type ConfigDepositBase = ConfigDepositBase;
 	type FriendDepositFactor = FriendDepositFactor;
diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml
index fa008f8e836a1c9eb7a4a09a9e96c92d4e829f26..1284f5ee8947b6ff8d0d6990ce45ba8710ab2711 100644
--- a/substrate/frame/revive/Cargo.toml
+++ b/substrate/frame/revive/Cargo.toml
@@ -46,7 +46,6 @@ sp-arithmetic = { workspace = true }
 sp-core = { workspace = true }
 sp-io = { workspace = true }
 sp-runtime = { workspace = true }
-sp-std = { workspace = true }
 subxt-signer = { workspace = true, optional = true, features = [
 	"unstable-eth",
 ] }
@@ -99,7 +98,6 @@ std = [
 	"sp-io/std",
 	"sp-keystore/std",
 	"sp-runtime/std",
-	"sp-std/std",
 	"subxt-signer",
 	"xcm-builder/std",
 	"xcm/std",
diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs
index b6ea1a06d94e878b7e67810ac8a57338aa7c91d8..b09bdef14632b937a351cea6b2968e889f2202a0 100644
--- a/substrate/frame/revive/proc-macro/src/lib.rs
+++ b/substrate/frame/revive/proc-macro/src/lib.rs
@@ -512,10 +512,9 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 {
 				let result = (|| #body)();
 				if ::log::log_enabled!(target: "runtime::revive::strace", ::log::Level::Trace) {
 						use core::fmt::Write;
-						let mut w = sp_std::Writer::default();
-						let _ = core::write!(&mut w, #trace_fmt_str, #( #trace_fmt_args, )* result);
-						let msg = core::str::from_utf8(&w.inner()).unwrap_or_default();
-						self.ext().append_debug_buffer(msg);
+						let mut msg = alloc::string::String::default();
+						let _ = core::write!(&mut msg, #trace_fmt_str, #( #trace_fmt_args, )* result);
+						self.ext().append_debug_buffer(&msg);
 				}
 				result
 			}
diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json b/substrate/frame/revive/rpc/examples/js/abi/Errors.json
similarity index 100%
rename from substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json
rename to substrate/frame/revive/rpc/examples/js/abi/Errors.json
diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/Errors.ts
similarity index 98%
rename from substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts
rename to substrate/frame/revive/rpc/examples/js/abi/Errors.ts
index f3776e498fd56c092b496a33edd6dd6d0b41d2a1..b39567531c6d3d2cf979234d2d30fcf31b305852 100644
--- a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts
+++ b/substrate/frame/revive/rpc/examples/js/abi/Errors.ts
@@ -1,4 +1,4 @@
-export const ErrorTesterAbi = [
+export const ErrorsAbi = [
   {
     inputs: [
       {
diff --git a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts
deleted file mode 100644
index f3776e498fd56c092b496a33edd6dd6d0b41d2a1..0000000000000000000000000000000000000000
--- a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts
+++ /dev/null
@@ -1,106 +0,0 @@
-export const ErrorTesterAbi = [
-  {
-    inputs: [
-      {
-        internalType: "string",
-        name: "message",
-        type: "string",
-      },
-    ],
-    name: "CustomError",
-    type: "error",
-  },
-  {
-    inputs: [
-      {
-        internalType: "bool",
-        name: "newState",
-        type: "bool",
-      },
-    ],
-    name: "setState",
-    outputs: [],
-    stateMutability: "nonpayable",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "state",
-    outputs: [
-      {
-        internalType: "bool",
-        name: "",
-        type: "bool",
-      },
-    ],
-    stateMutability: "view",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerAssertError",
-    outputs: [],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerCustomError",
-    outputs: [],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerDivisionByZero",
-    outputs: [
-      {
-        internalType: "uint256",
-        name: "",
-        type: "uint256",
-      },
-    ],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerOutOfBoundsError",
-    outputs: [
-      {
-        internalType: "uint256",
-        name: "",
-        type: "uint256",
-      },
-    ],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerRequireError",
-    outputs: [],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerRevertError",
-    outputs: [],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [
-      {
-        internalType: "uint256",
-        name: "value",
-        type: "uint256",
-      },
-    ],
-    name: "valueMatch",
-    outputs: [],
-    stateMutability: "payable",
-    type: "function",
-  },
-] as const;
diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb
index 46994bb147547bfb9b960b7630d1a6d274ee75dd..67df5841e43fba141c7a146a1e4a8958b4c7a84c 100755
Binary files a/substrate/frame/revive/rpc/examples/js/bun.lockb and b/substrate/frame/revive/rpc/examples/js/bun.lockb differ
diff --git a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol b/substrate/frame/revive/rpc/examples/js/contracts/Errors.sol
similarity index 98%
rename from substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol
rename to substrate/frame/revive/rpc/examples/js/contracts/Errors.sol
index f1fdd219624ab9cd7d367922908e10f1a10cf213..abbdba8d32eb4a2eb3abda6f39589cca2b14f168 100644
--- a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol
+++ b/substrate/frame/revive/rpc/examples/js/contracts/Errors.sol
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: MIT
 pragma solidity ^0.8.0;
 
-contract ErrorTester {
+contract Errors {
 	bool public state;
 
 	// Payable function that can be used to test insufficient funds errors
diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json
index 6d8d00fd4214721e3f42418c78e440aaa3309f4e..0119f4f34a177d6647158b45e830e58af9a85424 100644
--- a/substrate/frame/revive/rpc/examples/js/package.json
+++ b/substrate/frame/revive/rpc/examples/js/package.json
@@ -9,10 +9,10 @@
 		"preview": "vite preview"
 	},
 	"dependencies": {
+		"@parity/revive": "^0.0.5",
 		"ethers": "^6.13.4",
 		"solc": "^0.8.28",
-		"viem": "^2.21.47",
-		"@parity/revive": "^0.0.5"
+		"viem": "^2.21.47"
 	},
 	"devDependencies": {
 		"prettier": "^3.3.3",
diff --git a/substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm
similarity index 100%
rename from substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm
rename to substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm
diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
index 37ebbc9ea3b34d8f66b7e4cb51d9064f16193d51..871adeccbc9a450d273987f7f8b53576d79a1d42 100644
--- a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
@@ -1,7 +1,7 @@
 import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts'
 import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test'
 import { encodeFunctionData, Hex, parseEther } from 'viem'
-import { ErrorTesterAbi } from '../abi/ErrorTester'
+import { ErrorsAbi } from '../abi/Errors'
 import { FlipperCallerAbi } from '../abi/FlipperCaller'
 import { FlipperAbi } from '../abi/Flipper'
 
@@ -17,19 +17,19 @@ const envs = await Promise.all([createEnv('geth'), createEnv('kitchensink')])
 
 for (const env of envs) {
 	describe(env.serverWallet.chain.name, () => {
-		let errorTesterAddr: Hex = '0x'
+		let errorsAddr: Hex = '0x'
 		let flipperAddr: Hex = '0x'
 		let flipperCallerAddr: Hex = '0x'
 		beforeAll(async () => {
 			{
 				const hash = await env.serverWallet.deployContract({
-					abi: ErrorTesterAbi,
-					bytecode: getByteCode('errorTester', env.evm),
+					abi: ErrorsAbi,
+					bytecode: getByteCode('errors', env.evm),
 				})
 				const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash })
 				if (!deployReceipt.contractAddress)
 					throw new Error('Contract address should be set')
-				errorTesterAddr = deployReceipt.contractAddress
+				errorsAddr = deployReceipt.contractAddress
 			}
 
 			{
@@ -60,8 +60,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerAssertError',
 				})
 			} catch (err) {
@@ -78,8 +78,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerRevertError',
 				})
 			} catch (err) {
@@ -96,8 +96,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerDivisionByZero',
 				})
 			} catch (err) {
@@ -116,8 +116,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerOutOfBoundsError',
 				})
 			} catch (err) {
@@ -136,8 +136,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerCustomError',
 				})
 			} catch (err) {
@@ -154,8 +154,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.simulateContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('10'),
 					args: [parseEther('10')],
@@ -187,8 +187,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.estimateContractGas({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('10'),
 					args: [parseEther('10')],
@@ -205,8 +205,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.estimateContractGas({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('10'),
 					args: [parseEther('10')],
@@ -223,8 +223,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.serverWallet.estimateContractGas({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('11'),
 					args: [parseEther('10')],
@@ -255,8 +255,8 @@ for (const env of envs) {
 				expect(balance).toBe(0n)
 
 				await env.accountWallet.estimateContractGas({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'setState',
 					args: [true],
 				})
@@ -273,7 +273,7 @@ for (const env of envs) {
 			expect(balance).toBe(0n)
 
 			const data = encodeFunctionData({
-				abi: ErrorTesterAbi,
+				abi: ErrorsAbi,
 				functionName: 'setState',
 				args: [true],
 			})
@@ -284,29 +284,7 @@ for (const env of envs) {
 					{
 						data,
 						from: env.accountWallet.account.address,
-						to: errorTesterAddr,
-					},
-				],
-			})
-		})
-
-		test.only('eth_estimate (no gas specified) child_call', async () => {
-			let balance = await env.serverWallet.getBalance(env.accountWallet.account)
-			expect(balance).toBe(0n)
-
-			const data = encodeFunctionData({
-				abi: FlipperCallerAbi,
-				functionName: 'callFlip',
-			})
-
-			await env.accountWallet.request({
-				method: 'eth_estimateGas',
-				params: [
-					{
-						data,
-						from: env.accountWallet.account.address,
-						to: flipperCallerAddr,
-						gas: `0x${Number(1000000).toString(16)}`,
+						to: errorsAddr,
 					},
 				],
 			})
diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts
index 0040b0c78dc47f66521aeef559d8d2797201ea70..8289ac8b76e333e9d1361d09f52d0e9b5a7325b1 100644
--- a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts
@@ -1,9 +1,9 @@
 import { assert, getByteCode, walletClient } from './lib.ts'
-import { abi } from '../abi/piggyBank.ts'
+import { PiggyBankAbi } from '../abi/piggyBank.ts'
 import { parseEther } from 'viem'
 
 const hash = await walletClient.deployContract({
-	abi,
+	abi: PiggyBankAbi,
 	bytecode: getByteCode('piggyBank'),
 })
 const deployReceipt = await walletClient.waitForTransactionReceipt({ hash })
@@ -16,7 +16,7 @@ assert(contractAddress, 'Contract address should be set')
 	const result = await walletClient.estimateContractGas({
 		account: walletClient.account,
 		address: contractAddress,
-		abi,
+		abi: PiggyBankAbi,
 		functionName: 'deposit',
 		value: parseEther('10'),
 	})
@@ -26,7 +26,7 @@ assert(contractAddress, 'Contract address should be set')
 	const { request } = await walletClient.simulateContract({
 		account: walletClient.account,
 		address: contractAddress,
-		abi,
+		abi: PiggyBankAbi,
 		functionName: 'deposit',
 		value: parseEther('10'),
 	})
@@ -36,9 +36,6 @@ assert(contractAddress, 'Contract address should be set')
 
 	const receipt = await walletClient.waitForTransactionReceipt({ hash })
 	console.log(`Deposit receipt: ${receipt.status}`)
-	if (process.env.STOP) {
-		process.exit(0)
-	}
 }
 
 // Withdraw 5 WST
@@ -46,7 +43,7 @@ assert(contractAddress, 'Contract address should be set')
 	const { request } = await walletClient.simulateContract({
 		account: walletClient.account,
 		address: contractAddress,
-		abi,
+		abi: PiggyBankAbi,
 		functionName: 'withdraw',
 		args: [parseEther('5')],
 	})
@@ -58,7 +55,7 @@ assert(contractAddress, 'Contract address should be set')
 	// Check remaining balance
 	const balance = await walletClient.readContract({
 		address: contractAddress,
-		abi,
+		abi: PiggyBankAbi,
 		functionName: 'getDeposit',
 	})
 
diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata
index 64b1f2014dd06815fcea6a87bc96306eb00eda8b..402e8c2d22b21471929e9c61acd2cc968af614cf 100644
Binary files a/substrate/frame/revive/rpc/revive_chain.metadata and b/substrate/frame/revive/rpc/revive_chain.metadata differ
diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs
index 901c15e9756b65b6059602c59b2bc190d927d7b0..de97844eccbbf85efabfec11ddedc2df62af5209 100644
--- a/substrate/frame/revive/rpc/src/client.rs
+++ b/substrate/frame/revive/rpc/src/client.rs
@@ -17,7 +17,7 @@
 //! The client connects to the source substrate chain
 //! and is used by the rpc server to query and send transactions to the substrate chain.
 use crate::{
-	runtime::GAS_PRICE,
+	runtime::gas_from_fee,
 	subxt_client::{
 		revive::{calls::types::EthTransact, events::ContractEmitted},
 		runtime_types::pallet_revive::storage::ContractInfo,
@@ -771,7 +771,7 @@ impl Client {
 	pub async fn evm_block(&self, block: Arc<SubstrateBlock>) -> Result<Block, ClientError> {
 		let runtime_api = self.inner.api.runtime_api().at(block.hash());
 		let max_fee = Self::weight_to_fee(&runtime_api, self.max_block_weight()).await?;
-		let gas_limit = U256::from(max_fee / GAS_PRICE as u128);
+		let gas_limit = gas_from_fee(max_fee);
 
 		let header = block.header();
 		let timestamp = extract_block_timestamp(&block).await.unwrap_or_default();
diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs
index ccd8bb043e90ec77008eed81a87c83bd5baa35df..230f2f8b7ef963c742d7cdf2ce9b5c1f47506a1d 100644
--- a/substrate/frame/revive/rpc/src/lib.rs
+++ b/substrate/frame/revive/rpc/src/lib.rs
@@ -148,31 +148,12 @@ impl EthRpcServer for EthRpcServerImpl {
 
 	async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult<H256> {
 		let hash = H256(keccak_256(&transaction.0));
-
-		let tx = TransactionSigned::decode(&transaction.0).map_err(|err| {
-			log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}");
-			EthRpcError::from(err)
-		})?;
-
-		let eth_addr = tx.recover_eth_address().map_err(|err| {
-			log::debug!(target: LOG_TARGET, "Failed to recover eth address: {err:?}");
-			EthRpcError::InvalidSignature
-		})?;
-
-		let tx = GenericTransaction::from_signed(tx, Some(eth_addr));
-
-		// Dry run the transaction to get the weight limit and storage deposit limit
-		let dry_run = self.client.dry_run(tx, BlockTag::Latest.into()).await?;
-
-		let call = subxt_client::tx().revive().eth_transact(
-			transaction.0,
-			dry_run.gas_required.into(),
-			dry_run.storage_deposit,
-		);
+		let call = subxt_client::tx().revive().eth_transact(transaction.0);
 		self.client.submit(call).await.map_err(|err| {
 			log::debug!(target: LOG_TARGET, "submit call failed: {err:?}");
 			err
 		})?;
+
 		log::debug!(target: LOG_TARGET, "send_raw_transaction hash: {hash:?}");
 		Ok(hash)
 	}
diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs
index 43b600c33d786d275749f3bd98271b9b744dbb44..e64e16d45b2aec2a9c2a2e972684ab03b64df8d2 100644
--- a/substrate/frame/revive/rpc/src/tests.rs
+++ b/substrate/frame/revive/rpc/src/tests.rs
@@ -222,7 +222,7 @@ async fn deploy_and_call() -> anyhow::Result<()> {
 async fn revert_call() -> anyhow::Result<()> {
 	let _lock = SHARED_RESOURCES.write();
 	let client = SharedResources::client().await;
-	let (bytecode, contract) = get_contract("ErrorTester")?;
+	let (bytecode, contract) = get_contract("Errors")?;
 	let receipt = TransactionBuilder::default()
 		.input(bytecode)
 		.send_and_wait_for_receipt(&client)
diff --git a/substrate/frame/revive/src/evm.rs b/substrate/frame/revive/src/evm.rs
index c3495fc0559d220a7ccaaeaac488a12963b98a79..c8c967fbe091bb2af5bfaacaefbe854d4f9d0043 100644
--- a/substrate/frame/revive/src/evm.rs
+++ b/substrate/frame/revive/src/evm.rs
@@ -19,4 +19,6 @@
 
 mod api;
 pub use api::*;
+mod gas_encoder;
+pub use gas_encoder::*;
 pub mod runtime;
diff --git a/substrate/frame/revive/src/evm/api/byte.rs b/substrate/frame/revive/src/evm/api/byte.rs
index df4ed1740ecdb75a8a027fc17fdc6b6f222b0ae9..c2d64f8e5e424b2086e0247e5ccea0e08d4350b0 100644
--- a/substrate/frame/revive/src/evm/api/byte.rs
+++ b/substrate/frame/revive/src/evm/api/byte.rs
@@ -116,7 +116,10 @@ macro_rules! impl_hex {
 
         impl Debug for $type {
             fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
-                write!(f, concat!(stringify!($type), "({})"), self.0.to_hex())
+				let hex_str = self.0.to_hex();
+				let truncated = &hex_str[..hex_str.len().min(100)];
+				let ellipsis = if hex_str.len() > 100 { "..." } else { "" };
+                write!(f, concat!(stringify!($type), "({}{})"), truncated,ellipsis)
             }
         }
 
diff --git a/substrate/frame/revive/src/evm/gas_encoder.rs b/substrate/frame/revive/src/evm/gas_encoder.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ffdf8b13c0439140a9e2b41d9830bf8c581f3421
--- /dev/null
+++ b/substrate/frame/revive/src/evm/gas_encoder.rs
@@ -0,0 +1,174 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//! Encodes/Decodes EVM gas values.
+
+use crate::Weight;
+use core::ops::{Div, Rem};
+use frame_support::pallet_prelude::CheckedShl;
+use sp_arithmetic::traits::{One, Zero};
+use sp_core::U256;
+
+// We use 3 digits to store each component.
+const SCALE: u128 = 100;
+
+/// Rounds up the given value to the nearest multiple of the mask.
+///
+/// # Panics
+/// Panics if the `mask` is zero.
+fn round_up<T>(value: T, mask: T) -> T
+where
+	T: One + Zero + Copy + Rem<Output = T> + Div<Output = T>,
+	<T as Rem>::Output: PartialEq,
+{
+	let rest = if value % mask == T::zero() { T::zero() } else { T::one() };
+	value / mask + rest
+}
+
+/// Rounds up the log2 of the given value to the nearest integer.
+fn log2_round_up<T>(val: T) -> u128
+where
+	T: Into<u128>,
+{
+	let val = val.into();
+	val.checked_ilog2()
+		.map(|v| if 1u128 << v == val { v } else { v + 1 })
+		.unwrap_or(0) as u128
+}
+
+mod private {
+	pub trait Sealed {}
+	impl Sealed for () {}
+}
+
+/// Encodes/Decodes EVM gas values.
+///
+/// # Note
+///
+/// This is defined as a trait rather than standalone functions to allow
+/// it to be added as an associated type to [`crate::Config`]. This way,
+/// it can be invoked without requiring the implementation bounds to be
+/// explicitly specified.
+///
+/// This trait is sealed and cannot be implemented by downstream crates.
+pub trait GasEncoder<Balance>: private::Sealed {
+	/// Encodes all components (deposit limit, weight reference time, and proof size) into a single
+	/// gas value.
+	fn encode(gas_limit: U256, weight: Weight, deposit: Balance) -> U256;
+
+	/// Decodes the weight and deposit from the encoded gas value.
+	/// Returns `None` if the gas value is invalid
+	fn decode(gas: U256) -> Option<(Weight, Balance)>;
+}
+
+impl<Balance> GasEncoder<Balance> for ()
+where
+	Balance: Zero + One + CheckedShl + Into<u128>,
+{
+	/// The encoding follows the pattern `g...grrppdd`, where:
+	/// - `dd`: log2 Deposit value, encoded in the lowest 2 digits.
+	/// - `pp`: log2 Proof size, encoded in the next 2 digits.
+	/// - `rr`: log2 Reference time, encoded in the next 2 digits.
+	/// - `g...g`: Gas limit, encoded in the highest digits.
+	///
+	/// # Note
+	/// - The deposit value is maxed by 2^99
+	fn encode(gas_limit: U256, weight: Weight, deposit: Balance) -> U256 {
+		let deposit: u128 = deposit.into();
+		let deposit_component = log2_round_up(deposit);
+
+		let proof_size = weight.proof_size();
+		let proof_size_component = SCALE * log2_round_up(proof_size);
+
+		let ref_time = weight.ref_time();
+		let ref_time_component = SCALE.pow(2) * log2_round_up(ref_time);
+
+		let components = U256::from(deposit_component + proof_size_component + ref_time_component);
+
+		let raw_gas_mask = U256::from(SCALE).pow(3.into());
+		let raw_gas_component = if gas_limit < raw_gas_mask.saturating_add(components) {
+			raw_gas_mask
+		} else {
+			round_up(gas_limit, raw_gas_mask).saturating_mul(raw_gas_mask)
+		};
+
+		components.saturating_add(raw_gas_component)
+	}
+
+	fn decode(gas: U256) -> Option<(Weight, Balance)> {
+		let deposit = gas % SCALE;
+
+		// Casting with as_u32 is safe since all values are maxed by `SCALE`.
+		let deposit = deposit.as_u32();
+		let proof_time = ((gas / SCALE) % SCALE).as_u32();
+		let ref_time = ((gas / SCALE.pow(2)) % SCALE).as_u32();
+
+		let weight = Weight::from_parts(
+			if ref_time == 0 { 0 } else { 1u64.checked_shl(ref_time)? },
+			if proof_time == 0 { 0 } else { 1u64.checked_shl(proof_time)? },
+		);
+		let deposit =
+			if deposit == 0 { Balance::zero() } else { Balance::one().checked_shl(deposit)? };
+
+		Some((weight, deposit))
+	}
+}
+
+#[cfg(test)]
+mod test {
+	use super::*;
+
+	#[test]
+	fn test_gas_encoding_decoding_works() {
+		let raw_gas_limit = 111_111_999_999_999u128;
+		let weight = Weight::from_parts(222_999_999, 333_999_999);
+		let deposit = 444_999_999u64;
+
+		let encoded_gas = <() as GasEncoder<u64>>::encode(raw_gas_limit.into(), weight, deposit);
+		assert_eq!(encoded_gas, U256::from(111_112_000_282_929u128));
+		assert!(encoded_gas > raw_gas_limit.into());
+
+		let (decoded_weight, decoded_deposit) =
+			<() as GasEncoder<u64>>::decode(encoded_gas).unwrap();
+		assert!(decoded_weight.all_gte(weight));
+		assert!(weight.mul(2).all_gte(weight));
+
+		assert!(decoded_deposit >= deposit);
+		assert!(deposit * 2 >= decoded_deposit);
+	}
+
+	#[test]
+	fn test_encoding_zero_values_work() {
+		let encoded_gas = <() as GasEncoder<u64>>::encode(
+			Default::default(),
+			Default::default(),
+			Default::default(),
+		);
+
+		assert_eq!(encoded_gas, U256::from(1_00_00_00));
+
+		let (decoded_weight, decoded_deposit) =
+			<() as GasEncoder<u64>>::decode(encoded_gas).unwrap();
+		assert_eq!(Weight::default(), decoded_weight);
+		assert_eq!(0u64, decoded_deposit);
+	}
+
+	#[test]
+	fn test_overflow() {
+		assert_eq!(None, <() as GasEncoder<u64>>::decode(65_00u128.into()), "Invalid proof size");
+		assert_eq!(None, <() as GasEncoder<u64>>::decode(65_00_00u128.into()), "Invalid ref_time");
+	}
+}
diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs
index 24b75de835698697311d191a812df36f6fd81390..d4b344e20eb850e6c42d84bdf870204c1382cb67 100644
--- a/substrate/frame/revive/src/evm/runtime.rs
+++ b/substrate/frame/revive/src/evm/runtime.rs
@@ -16,9 +16,13 @@
 // limitations under the License.
 //! Runtime types for integrating `pallet-revive` with the EVM.
 use crate::{
-	evm::api::{GenericTransaction, TransactionSigned},
-	AccountIdOf, AddressMapper, BalanceOf, MomentOf, Weight, LOG_TARGET,
+	evm::{
+		api::{GenericTransaction, TransactionSigned},
+		GasEncoder,
+	},
+	AccountIdOf, AddressMapper, BalanceOf, Config, MomentOf, LOG_TARGET,
 };
+use alloc::vec::Vec;
 use codec::{Decode, Encode};
 use frame_support::{
 	dispatch::{DispatchInfo, GetDispatchInfo},
@@ -26,20 +30,17 @@ use frame_support::{
 };
 use pallet_transaction_payment::OnChargeTransaction;
 use scale_info::{StaticTypeInfo, TypeInfo};
-use sp_arithmetic::Percent;
 use sp_core::{Get, H256, U256};
 use sp_runtime::{
 	generic::{self, CheckedExtrinsic, ExtrinsicFormat},
 	traits::{
-		self, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata, IdentifyAccount, Member,
-		TransactionExtension,
+		self, AtLeast32BitUnsigned, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata,
+		IdentifyAccount, Member, TransactionExtension,
 	},
 	transaction_validity::{InvalidTransaction, TransactionValidityError},
 	OpaqueExtrinsic, RuntimeDebug, Saturating,
 };
 
-use alloc::vec::Vec;
-
 type CallOf<T> = <T as frame_system::Config>::RuntimeCall;
 
 /// The EVM gas price.
@@ -48,7 +49,28 @@ type CallOf<T> = <T as frame_system::Config>::RuntimeCall;
 /// We use a fixed value for the gas price.
 /// This let us calculate the gas estimate for a transaction with the formula:
 /// `estimate_gas = substrate_fee / gas_price`.
-pub const GAS_PRICE: u32 = 1u32;
+///
+/// The chosen constant value is:
+/// - Not too high, ensuring the gas value is large enough (at least 7 digits) to encode the
+///   ref_time, proof_size, and deposit into the less significant (6 lower) digits of the gas value.
+/// - Not too low, enabling users to adjust the gas price to define a tip.
+pub const GAS_PRICE: u32 = 1_000u32;
+
+/// Convert a `Balance` into a gas value, using the fixed `GAS_PRICE`.
+/// The gas is calculated as `balance / GAS_PRICE`, rounded up to the nearest integer.
+pub fn gas_from_fee<Balance>(fee: Balance) -> U256
+where
+	u32: Into<Balance>,
+	Balance: Into<U256> + AtLeast32BitUnsigned + Copy,
+{
+	let gas_price = GAS_PRICE.into();
+	let remainder = fee % gas_price;
+	if remainder.is_zero() {
+		(fee / gas_price).into()
+	} else {
+		(fee.saturating_add(gas_price) / gas_price).into()
+	}
+}
 
 /// Wraps [`generic::UncheckedExtrinsic`] to support checking unsigned
 /// [`crate::Call::eth_transact`] extrinsic.
@@ -140,15 +162,8 @@ where
 	fn check(self, lookup: &Lookup) -> Result<Self::Checked, TransactionValidityError> {
 		if !self.0.is_signed() {
 			if let Ok(call) = self.0.function.clone().try_into() {
-				if let crate::Call::eth_transact { payload, gas_limit, storage_deposit_limit } =
-					call
-				{
-					let checked = E::try_into_checked_extrinsic(
-						payload,
-						gas_limit,
-						storage_deposit_limit,
-						self.encoded_size(),
-					)?;
+				if let crate::Call::eth_transact { payload } = call {
+					let checked = E::try_into_checked_extrinsic(payload, self.encoded_size())?;
 					return Ok(checked)
 				};
 			}
@@ -251,7 +266,7 @@ where
 /// EthExtra convert an unsigned [`crate::Call::eth_transact`] into a [`CheckedExtrinsic`].
 pub trait EthExtra {
 	/// The Runtime configuration.
-	type Config: crate::Config + pallet_transaction_payment::Config;
+	type Config: Config + pallet_transaction_payment::Config;
 
 	/// The Runtime's transaction extension.
 	/// It should include at least:
@@ -281,8 +296,6 @@ pub trait EthExtra {
 	/// - `encoded_len`: The encoded length of the extrinsic.
 	fn try_into_checked_extrinsic(
 		payload: Vec<u8>,
-		gas_limit: Weight,
-		storage_deposit_limit: BalanceOf<Self::Config>,
 		encoded_len: usize,
 	) -> Result<
 		CheckedExtrinsic<AccountIdOf<Self::Config>, CallOf<Self::Config>, Self::Extension>,
@@ -307,12 +320,16 @@ pub trait EthExtra {
 			InvalidTransaction::BadProof
 		})?;
 
-		let signer =
-			<Self::Config as crate::Config>::AddressMapper::to_fallback_account_id(&signer);
+		let signer = <Self::Config as Config>::AddressMapper::to_fallback_account_id(&signer);
 		let GenericTransaction { nonce, chain_id, to, value, input, gas, gas_price, .. } =
 			GenericTransaction::from_signed(tx, None);
 
-		if chain_id.unwrap_or_default() != <Self::Config as crate::Config>::ChainId::get().into() {
+		let Some(gas) = gas else {
+			log::debug!(target: LOG_TARGET, "No gas provided");
+			return Err(InvalidTransaction::Call);
+		};
+
+		if chain_id.unwrap_or_default() != <Self::Config as Config>::ChainId::get().into() {
 			log::debug!(target: LOG_TARGET, "Invalid chain_id {chain_id:?}");
 			return Err(InvalidTransaction::Call);
 		}
@@ -324,6 +341,13 @@ pub trait EthExtra {
 			})?;
 
 		let data = input.unwrap_or_default().0;
+
+		let (gas_limit, storage_deposit_limit) =
+			<Self::Config as Config>::EthGasEncoder::decode(gas).ok_or_else(|| {
+				log::debug!(target: LOG_TARGET, "Failed to decode gas: {gas:?}");
+				InvalidTransaction::Call
+			})?;
+
 		let call = if let Some(dest) = to {
 			crate::Call::call::<Self::Config> {
 				dest,
@@ -359,13 +383,13 @@ pub trait EthExtra {
 		// Fees calculated with the fixed `GAS_PRICE`
 		// When we dry-run the transaction, we set the gas to `Fee / GAS_PRICE`
 		let eth_fee_no_tip = U256::from(GAS_PRICE)
-			.saturating_mul(gas.unwrap_or_default())
+			.saturating_mul(gas)
 			.try_into()
 			.map_err(|_| InvalidTransaction::Call)?;
 
 		// Fees with the actual gas_price from the transaction.
 		let eth_fee: BalanceOf<Self::Config> = U256::from(gas_price.unwrap_or_default())
-			.saturating_mul(gas.unwrap_or_default())
+			.saturating_mul(gas)
 			.try_into()
 			.map_err(|_| InvalidTransaction::Call)?;
 
@@ -380,27 +404,17 @@ pub trait EthExtra {
 				Default::default(),
 			)
 			.into();
-		log::trace!(target: LOG_TARGET, "try_into_checked_extrinsic: encoded_len: {encoded_len:?} actual_fee: {actual_fee:?} eth_fee: {eth_fee:?}");
+		log::debug!(target: LOG_TARGET, "try_into_checked_extrinsic: gas_price: {gas_price:?}, encoded_len: {encoded_len:?} actual_fee: {actual_fee:?} eth_fee: {eth_fee:?}");
 
 		// The fees from the Ethereum transaction should be greater or equal to the actual fees paid
 		// by the account.
 		if eth_fee < actual_fee {
-			log::debug!(target: LOG_TARGET, "fees {eth_fee:?} too low for the extrinsic {actual_fee:?}");
+			log::debug!(target: LOG_TARGET, "eth fees {eth_fee:?} too low, actual fees: {actual_fee:?}");
 			return Err(InvalidTransaction::Payment.into())
 		}
 
-		let min = actual_fee.min(eth_fee_no_tip);
-		let max = actual_fee.max(eth_fee_no_tip);
-		let diff = Percent::from_rational(max - min, min);
-		if diff > Percent::from_percent(10) {
-			log::trace!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?} should be no more than 10% got {diff:?}");
-			return Err(InvalidTransaction::Call.into())
-		} else {
-			log::trace!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?}:  {diff:?}");
-		}
-
 		let tip = eth_fee.saturating_sub(eth_fee_no_tip);
-		log::debug!(target: LOG_TARGET, "Created checked Ethereum transaction with nonce {nonce:?} and tip: {tip:?}");
+		log::debug!(target: LOG_TARGET, "Created checked Ethereum transaction with nonce: {nonce:?} and tip: {tip:?}");
 		Ok(CheckedExtrinsic {
 			format: ExtrinsicFormat::Signed(signer.into(), Self::get_eth_extension(nonce, tip)),
 			function,
@@ -415,6 +429,7 @@ mod test {
 		evm::*,
 		test_utils::*,
 		tests::{ExtBuilder, RuntimeCall, RuntimeOrigin, Test},
+		Weight,
 	};
 	use frame_support::{error::LookupError, traits::fungible::Mutate};
 	use pallet_revive_fixtures::compile_module;
@@ -456,8 +471,6 @@ mod test {
 	#[derive(Clone)]
 	struct UncheckedExtrinsicBuilder {
 		tx: GenericTransaction,
-		gas_limit: Weight,
-		storage_deposit_limit: BalanceOf<Test>,
 		before_validate: Option<std::sync::Arc<dyn Fn() + Send + Sync>>,
 	}
 
@@ -467,12 +480,10 @@ mod test {
 			Self {
 				tx: GenericTransaction {
 					from: Some(Account::default().address()),
-					chain_id: Some(<Test as crate::Config>::ChainId::get().into()),
+					chain_id: Some(<Test as Config>::ChainId::get().into()),
 					gas_price: Some(U256::from(GAS_PRICE)),
 					..Default::default()
 				},
-				gas_limit: Weight::zero(),
-				storage_deposit_limit: 0,
 				before_validate: None,
 			}
 		}
@@ -500,7 +511,6 @@ mod test {
 		fn call_with(dest: H160) -> Self {
 			let mut builder = Self::new();
 			builder.tx.to = Some(dest);
-			ExtBuilder::default().build().execute_with(|| builder.estimate_gas());
 			builder
 		}
 
@@ -508,45 +518,42 @@ mod test {
 		fn instantiate_with(code: Vec<u8>, data: Vec<u8>) -> Self {
 			let mut builder = Self::new();
 			builder.tx.input = Some(Bytes(code.into_iter().chain(data.into_iter()).collect()));
-			ExtBuilder::default().build().execute_with(|| builder.estimate_gas());
 			builder
 		}
 
-		/// Update the transaction with the given function.
-		fn update(mut self, f: impl FnOnce(&mut GenericTransaction) -> ()) -> Self {
-			f(&mut self.tx);
-			self
-		}
 		/// Set before_validate function.
 		fn before_validate(mut self, f: impl Fn() + Send + Sync + 'static) -> Self {
 			self.before_validate = Some(std::sync::Arc::new(f));
 			self
 		}
 
+		fn check(
+			self,
+		) -> Result<(RuntimeCall, SignedExtra, GenericTransaction), TransactionValidityError> {
+			self.mutate_estimate_and_check(Box::new(|_| ()))
+		}
+
 		/// Call `check` on the unchecked extrinsic, and `pre_dispatch` on the signed extension.
-		fn check(&self) -> Result<(RuntimeCall, SignedExtra), TransactionValidityError> {
+		fn mutate_estimate_and_check(
+			mut self,
+			f: Box<dyn FnOnce(&mut GenericTransaction) -> ()>,
+		) -> Result<(RuntimeCall, SignedExtra, GenericTransaction), TransactionValidityError> {
+			ExtBuilder::default().build().execute_with(|| self.estimate_gas());
+			f(&mut self.tx);
 			ExtBuilder::default().build().execute_with(|| {
-				let UncheckedExtrinsicBuilder {
-					tx,
-					gas_limit,
-					storage_deposit_limit,
-					before_validate,
-				} = self.clone();
+				let UncheckedExtrinsicBuilder { tx, before_validate, .. } = self.clone();
 
 				// Fund the account.
 				let account = Account::default();
-				let _ = <Test as crate::Config>::Currency::set_balance(
+				let _ = <Test as Config>::Currency::set_balance(
 					&account.substrate_account(),
 					100_000_000_000_000,
 				);
 
-				let payload =
-					account.sign_transaction(tx.try_into_unsigned().unwrap()).signed_payload();
-				let call = RuntimeCall::Contracts(crate::Call::eth_transact {
-					payload,
-					gas_limit,
-					storage_deposit_limit,
-				});
+				let payload = account
+					.sign_transaction(tx.clone().try_into_unsigned().unwrap())
+					.signed_payload();
+				let call = RuntimeCall::Contracts(crate::Call::eth_transact { payload });
 
 				let encoded_len = call.encoded_size();
 				let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into();
@@ -565,7 +572,7 @@ mod test {
 					0,
 				)?;
 
-				Ok((result.function, extra))
+				Ok((result.function, extra, tx))
 			})
 		}
 	}
@@ -573,14 +580,18 @@ mod test {
 	#[test]
 	fn check_eth_transact_call_works() {
 		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]));
+		let (call, _, tx) = builder.check().unwrap();
+		let (gas_limit, storage_deposit_limit) =
+			<<Test as Config>::EthGasEncoder as GasEncoder<_>>::decode(tx.gas.unwrap()).unwrap();
+
 		assert_eq!(
-			builder.check().unwrap().0,
+			call,
 			crate::Call::call::<Test> {
-				dest: builder.tx.to.unwrap(),
-				value: builder.tx.value.unwrap_or_default().as_u64(),
-				gas_limit: builder.gas_limit,
-				storage_deposit_limit: builder.storage_deposit_limit,
-				data: builder.tx.input.unwrap_or_default().0
+				dest: tx.to.unwrap(),
+				value: tx.value.unwrap_or_default().as_u64(),
+				data: tx.input.unwrap_or_default().0,
+				gas_limit,
+				storage_deposit_limit
 			}
 			.into()
 		);
@@ -591,16 +602,19 @@ mod test {
 		let (code, _) = compile_module("dummy").unwrap();
 		let data = vec![];
 		let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone());
+		let (call, _, tx) = builder.check().unwrap();
+		let (gas_limit, storage_deposit_limit) =
+			<<Test as Config>::EthGasEncoder as GasEncoder<_>>::decode(tx.gas.unwrap()).unwrap();
 
 		assert_eq!(
-			builder.check().unwrap().0,
+			call,
 			crate::Call::instantiate_with_code::<Test> {
-				value: builder.tx.value.unwrap_or_default().as_u64(),
-				gas_limit: builder.gas_limit,
-				storage_deposit_limit: builder.storage_deposit_limit,
+				value: tx.value.unwrap_or_default().as_u64(),
 				code,
 				data,
-				salt: None
+				salt: None,
+				gas_limit,
+				storage_deposit_limit
 			}
 			.into()
 		);
@@ -608,11 +622,10 @@ mod test {
 
 	#[test]
 	fn check_eth_transact_nonce_works() {
-		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]))
-			.update(|tx| tx.nonce = Some(1u32.into()));
+		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]));
 
 		assert_eq!(
-			builder.check(),
+			builder.mutate_estimate_and_check(Box::new(|tx| tx.nonce = Some(1u32.into()))),
 			Err(TransactionValidityError::Invalid(InvalidTransaction::Future))
 		);
 
@@ -629,11 +642,10 @@ mod test {
 
 	#[test]
 	fn check_eth_transact_chain_id_works() {
-		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]))
-			.update(|tx| tx.chain_id = Some(42.into()));
+		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]));
 
 		assert_eq!(
-			builder.check(),
+			builder.mutate_estimate_and_check(Box::new(|tx| tx.chain_id = Some(42.into()))),
 			Err(TransactionValidityError::Invalid(InvalidTransaction::Call))
 		);
 	}
@@ -646,14 +658,14 @@ mod test {
 
 		// Fail because the tx input fail to get the blob length
 		assert_eq!(
-			builder.clone().update(|tx| tx.input = Some(Bytes(vec![1, 2, 3]))).check(),
+			builder.mutate_estimate_and_check(Box::new(|tx| tx.input = Some(Bytes(vec![1, 2, 3])))),
 			Err(TransactionValidityError::Invalid(InvalidTransaction::Call))
 		);
 	}
 
 	#[test]
 	fn check_transaction_fees() {
-		let scenarios: [(_, Box<dyn FnOnce(&mut GenericTransaction)>, _); 5] = [
+		let scenarios: Vec<(_, Box<dyn FnOnce(&mut GenericTransaction)>, _)> = vec![
 			(
 				"Eth fees too low",
 				Box::new(|tx| {
@@ -661,42 +673,20 @@ mod test {
 				}),
 				InvalidTransaction::Payment,
 			),
-			(
-				"Gas fees too high",
-				Box::new(|tx| {
-					tx.gas = Some(tx.gas.unwrap() * 2);
-				}),
-				InvalidTransaction::Call,
-			),
 			(
 				"Gas fees too low",
 				Box::new(|tx| {
-					tx.gas = Some(tx.gas.unwrap() * 2);
-				}),
-				InvalidTransaction::Call,
-			),
-			(
-				"Diff > 10%",
-				Box::new(|tx| {
-					tx.gas = Some(tx.gas.unwrap() * 111 / 100);
+					tx.gas = Some(tx.gas.unwrap() / 2);
 				}),
-				InvalidTransaction::Call,
-			),
-			(
-				"Diff < 10%",
-				Box::new(|tx| {
-					tx.gas_price = Some(tx.gas_price.unwrap() * 2);
-					tx.gas = Some(tx.gas.unwrap() * 89 / 100);
-				}),
-				InvalidTransaction::Call,
+				InvalidTransaction::Payment,
 			),
 		];
 
 		for (msg, update_tx, err) in scenarios {
-			let builder =
-				UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx);
+			let res = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]))
+				.mutate_estimate_and_check(update_tx);
 
-			assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg);
+			assert_eq!(res, Err(TransactionValidityError::Invalid(err)), "{}", msg);
 		}
 	}
 
@@ -704,16 +694,16 @@ mod test {
 	fn check_transaction_tip() {
 		let (code, _) = compile_module("dummy").unwrap();
 		let data = vec![];
-		let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone())
-			.update(|tx| {
-				tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100);
-				log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price);
-			});
+		let (_, extra, tx) =
+			UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone())
+				.mutate_estimate_and_check(Box::new(|tx| {
+					tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100);
+					log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price);
+				}))
+				.unwrap();
 
-		let tx = &builder.tx;
 		let expected_tip =
 			tx.gas_price.unwrap() * tx.gas.unwrap() - U256::from(GAS_PRICE) * tx.gas.unwrap();
-		let (_, extra) = builder.check().unwrap();
 		assert_eq!(U256::from(extra.1.tip()), expected_tip);
 	}
 }
diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs
index b9a39e7ce4d370dfe33d88a5cef52b58d9dcdb80..04bce264a188c1b55f1a9c53398903a9a9e55545 100644
--- a/substrate/frame/revive/src/lib.rs
+++ b/substrate/frame/revive/src/lib.rs
@@ -41,7 +41,10 @@ pub mod test_utils;
 pub mod weights;
 
 use crate::{
-	evm::{runtime::GAS_PRICE, GenericTransaction},
+	evm::{
+		runtime::{gas_from_fee, GAS_PRICE},
+		GasEncoder, GenericTransaction,
+	},
 	exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack},
 	gas::GasMeter,
 	storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager},
@@ -295,6 +298,11 @@ pub mod pallet {
 		/// The ratio between the decimal representation of the native token and the ETH token.
 		#[pallet::constant]
 		type NativeToEthRatio: Get<u32>;
+
+		/// Encode and decode Ethereum gas values.
+		/// Only valid value is `()`. See [`GasEncoder`].
+		#[pallet::no_default_bounds]
+		type EthGasEncoder: GasEncoder<BalanceOf<Self>>;
 	}
 
 	/// Container for different types that implement [`DefaultConfig`]` of this pallet.
@@ -368,6 +376,7 @@ pub mod pallet {
 			type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>;
 			type ChainId = ConstU64<0>;
 			type NativeToEthRatio = ConstU32<1>;
+			type EthGasEncoder = ();
 		}
 	}
 
@@ -560,6 +569,8 @@ pub mod pallet {
 		AccountUnmapped,
 		/// Tried to map an account that is already mapped.
 		AccountAlreadyMapped,
+		/// The transaction used to dry-run a contract is invalid.
+		InvalidGenericTransaction,
 	}
 
 	/// A reason for the pallet contracts placing a hold on funds.
@@ -761,12 +772,7 @@ pub mod pallet {
 		#[allow(unused_variables)]
 		#[pallet::call_index(0)]
 		#[pallet::weight(Weight::MAX)]
-		pub fn eth_transact(
-			origin: OriginFor<T>,
-			payload: Vec<u8>,
-			gas_limit: Weight,
-			#[pallet::compact] storage_deposit_limit: BalanceOf<T>,
-		) -> DispatchResultWithPostInfo {
+		pub fn eth_transact(origin: OriginFor<T>, payload: Vec<u8>) -> DispatchResultWithPostInfo {
 			Err(frame_system::Error::CallFiltered::<T>.into())
 		}
 
@@ -1406,11 +1412,8 @@ where
 				return Err(EthTransactError::Message("Invalid transaction".into()));
 			};
 
-			let eth_dispatch_call = crate::Call::<T>::eth_transact {
-				payload: unsigned_tx.dummy_signed_payload(),
-				gas_limit: result.gas_required,
-				storage_deposit_limit: result.storage_deposit,
-			};
+			let eth_dispatch_call =
+				crate::Call::<T>::eth_transact { payload: unsigned_tx.dummy_signed_payload() };
 			let encoded_len = utx_encoded_size(eth_dispatch_call);
 			let fee = pallet_transaction_payment::Pallet::<T>::compute_fee(
 				encoded_len,
@@ -1418,7 +1421,9 @@ where
 				0u32.into(),
 			)
 			.into();
-			let eth_gas: U256 = (fee / GAS_PRICE.into()).into();
+			let eth_gas = gas_from_fee(fee);
+			let eth_gas =
+				T::EthGasEncoder::encode(eth_gas, result.gas_required, result.storage_deposit);
 
 			if eth_gas == result.eth_gas {
 				log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}");
diff --git a/substrate/frame/revive/src/transient_storage.rs b/substrate/frame/revive/src/transient_storage.rs
index 298e0296fe69e383567c951408fbf68155fc5810..d88adc4373590847a28a9ef18aaf4eba91fc38b3 100644
--- a/substrate/frame/revive/src/transient_storage.rs
+++ b/substrate/frame/revive/src/transient_storage.rs
@@ -22,11 +22,11 @@ use crate::{
 	storage::WriteOutcome,
 	Config, Error,
 };
+use alloc::{collections::BTreeMap, vec::Vec};
 use codec::Encode;
-use core::marker::PhantomData;
+use core::{marker::PhantomData, mem};
 use frame_support::DefaultNoBound;
 use sp_runtime::{DispatchError, DispatchResult, Saturating};
-use sp_std::{collections::btree_map::BTreeMap, mem, vec::Vec};
 
 /// Meter entry tracks transaction allocations.
 #[derive(Default, Debug)]
diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml
index dedde9956b6f4c9027692809bb23a87ceefe4d5a..c539f1dc4dc10a821c4349cf539f95340a330e1e 100644
--- a/substrate/frame/root-offences/Cargo.toml
+++ b/substrate/frame/root-offences/Cargo.toml
@@ -34,7 +34,6 @@ pallet-timestamp = { workspace = true, default-features = true }
 
 sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true }
-sp-std = { workspace = true, default-features = true }
 
 frame-election-provider-support = { workspace = true, default-features = true }
 
diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml
index b3ed95bf1de5bc42bcf3bb22b6d747240e337cac..626993a0547b58bee5a125edcec1ffce9ed5b779 100644
--- a/substrate/frame/salary/Cargo.toml
+++ b/substrate/frame/salary/Cargo.toml
@@ -17,43 +17,25 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive"], workspace = true }
-frame-benchmarking = { optional = true, workspace = true }
-frame-support = { workspace = true }
-frame-system = { workspace = true }
+frame = { workspace = true, features = ["experimental", "runtime"] }
 log = { workspace = true }
 pallet-ranked-collective = { optional = true, workspace = true }
 scale-info = { features = ["derive"], workspace = true }
-sp-arithmetic = { workspace = true }
-sp-core = { workspace = true }
-sp-io = { workspace = true }
-sp-runtime = { workspace = true }
 
 [features]
 default = ["std"]
 std = [
 	"codec/std",
-	"frame-benchmarking?/std",
-	"frame-support/experimental",
-	"frame-support/std",
-	"frame-system/std",
+	"frame/std",
 	"log/std",
 	"pallet-ranked-collective/std",
 	"scale-info/std",
-	"sp-arithmetic/std",
-	"sp-core/std",
-	"sp-io/std",
-	"sp-runtime/std",
 ]
 runtime-benchmarks = [
-	"frame-benchmarking/runtime-benchmarks",
-	"frame-support/runtime-benchmarks",
-	"frame-system/runtime-benchmarks",
+	"frame/runtime-benchmarks",
 	"pallet-ranked-collective/runtime-benchmarks",
-	"sp-runtime/runtime-benchmarks",
 ]
 try-runtime = [
-	"frame-support/try-runtime",
-	"frame-system/try-runtime",
+	"frame/try-runtime",
 	"pallet-ranked-collective?/try-runtime",
-	"sp-runtime/try-runtime",
 ]
diff --git a/substrate/frame/salary/src/benchmarking.rs b/substrate/frame/salary/src/benchmarking.rs
index aeae8d2d67f88ba8509f0aed7031b7983233a1d3..6dfd6f6dd488933eaf65f93820c9ed1f2f2f21aa 100644
--- a/substrate/frame/salary/src/benchmarking.rs
+++ b/substrate/frame/salary/src/benchmarking.rs
@@ -22,10 +22,7 @@
 use super::*;
 use crate::Pallet as Salary;
 
-use frame_benchmarking::v2::*;
-use frame_system::{Pallet as System, RawOrigin};
-use sp_core::Get;
-
+use frame::benchmarking::prelude::*;
 const SEED: u32 = 0;
 
 fn ensure_member_with_salary<T: Config<I>, I: 'static>(who: &T::AccountId) {
@@ -37,7 +34,7 @@ fn ensure_member_with_salary<T: Config<I>, I: 'static>(who: &T::AccountId) {
 	for _ in 0..255 {
 		let r = T::Members::rank_of(who).expect("prior guard ensures `who` is a member; qed");
 		if !T::Salary::get_salary(r, &who).is_zero() {
-			break
+			break;
 		}
 		T::Members::promote(who).unwrap();
 	}
diff --git a/substrate/frame/salary/src/lib.rs b/substrate/frame/salary/src/lib.rs
index efb4f5d3c5422d96b2b2038122de8c2d391cc855..6a843625f4a7bc61901b890f79135c183188d354 100644
--- a/substrate/frame/salary/src/lib.rs
+++ b/substrate/frame/salary/src/lib.rs
@@ -19,20 +19,10 @@
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
-use codec::{Decode, Encode, MaxEncodedLen};
 use core::marker::PhantomData;
-use scale_info::TypeInfo;
-use sp_arithmetic::traits::{Saturating, Zero};
-use sp_runtime::{Perbill, RuntimeDebug};
-
-use frame_support::{
-	defensive,
-	dispatch::DispatchResultWithPostInfo,
-	ensure,
-	traits::{
-		tokens::{GetSalary, Pay, PaymentStatus},
-		RankedMembers, RankedMembersSwapHandler,
-	},
+use frame::{
+	prelude::*,
+	traits::tokens::{GetSalary, Pay, PaymentStatus},
 };
 
 #[cfg(test)]
@@ -85,12 +75,9 @@ pub struct ClaimantStatus<CycleIndex, Balance, Id> {
 	status: ClaimState<Balance, Id>,
 }
 
-#[frame_support::pallet]
+#[frame::pallet]
 pub mod pallet {
 	use super::*;
-	use frame_support::{dispatch::Pays, pallet_prelude::*};
-	use frame_system::pallet_prelude::*;
-
 	#[pallet::pallet]
 	pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
 
@@ -460,15 +447,15 @@ impl<T: Config<I>, I: 'static>
 	) {
 		if who == new_who {
 			defensive!("Should not try to swap with self");
-			return
+			return;
 		}
 		if Claimant::<T, I>::contains_key(new_who) {
 			defensive!("Should not try to overwrite existing claimant");
-			return
+			return;
 		}
 
 		let Some(claimant) = Claimant::<T, I>::take(who) else {
-			frame_support::defensive!("Claimant should exist when swapping");
+			defensive!("Claimant should exist when swapping");
 			return;
 		};
 
diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs
index 0c1fb8bbdcba08f9c171b4c0f78f34a5b091d5f4..e4e9c8f6a31b5e8adfd15d7ab6b984193eef3939 100644
--- a/substrate/frame/salary/src/tests/integration.rs
+++ b/substrate/frame/salary/src/tests/integration.rs
@@ -19,25 +19,14 @@
 
 use crate as pallet_salary;
 use crate::*;
-use frame_support::{
-	assert_noop, assert_ok, derive_impl, hypothetically,
-	pallet_prelude::Weight,
-	parameter_types,
-	traits::{ConstU64, EitherOf, MapSuccess, NoOpPoll},
-};
+use frame::{deps::sp_io, testing_prelude::*};
 use pallet_ranked_collective::{EnsureRanked, Geometric};
-use sp_core::{ConstU16, Get};
-use sp_runtime::{
-	traits::{Convert, ReduceBy, ReplaceWithDefault},
-	BuildStorage,
-};
 
 type Rank = u16;
 type Block = frame_system::mocking::MockBlock<Test>;
 
-frame_support::construct_runtime!(
-	pub enum Test
-	{
+construct_runtime!(
+	pub struct Test {
 		System: frame_system,
 		Salary: pallet_salary,
 		Club: pallet_ranked_collective,
@@ -145,9 +134,9 @@ impl pallet_ranked_collective::Config for Test {
 	type BenchmarkSetup = Salary;
 }
 
-pub fn new_test_ext() -> sp_io::TestExternalities {
+pub fn new_test_ext() -> TestState {
 	let t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-	let mut ext = sp_io::TestExternalities::new(t);
+	let mut ext = TestState::new(t);
 	ext.execute_with(|| System::set_block_number(1));
 	ext
 }
@@ -194,7 +183,7 @@ fn swap_exhaustive_works() {
 
 			// The events mess up the storage root:
 			System::reset_events();
-			sp_io::storage::root(sp_runtime::StateVersion::V1)
+			sp_io::storage::root(StateVersion::V1)
 		});
 
 		let root_swap = hypothetically!({
@@ -207,7 +196,7 @@ fn swap_exhaustive_works() {
 
 			// The events mess up the storage root:
 			System::reset_events();
-			sp_io::storage::root(sp_runtime::StateVersion::V1)
+			sp_io::storage::root(StateVersion::V1)
 		});
 
 		assert_eq!(root_add, root_swap);
diff --git a/substrate/frame/salary/src/tests/unit.rs b/substrate/frame/salary/src/tests/unit.rs
index db1c8b947ef57c7c1919568f5bf0545f0449994d..3bb7bc4adf1ea5c04626cc774f416e1494950dae 100644
--- a/substrate/frame/salary/src/tests/unit.rs
+++ b/substrate/frame/salary/src/tests/unit.rs
@@ -17,23 +17,15 @@
 
 //! The crate's tests.
 
-use std::collections::BTreeMap;
-
-use core::cell::RefCell;
-use frame_support::{
-	assert_noop, assert_ok, derive_impl,
-	pallet_prelude::Weight,
-	parameter_types,
-	traits::{tokens::ConvertRank, ConstU64},
-};
-use sp_runtime::{traits::Identity, BuildStorage, DispatchResult};
-
 use crate as pallet_salary;
 use crate::*;
+use core::cell::RefCell;
+use frame::{deps::sp_runtime::traits::Identity, testing_prelude::*, traits::tokens::ConvertRank};
+use std::collections::BTreeMap;
 
-type Block = frame_system::mocking::MockBlock<Test>;
+type Block = MockBlock<Test>;
 
-frame_support::construct_runtime!(
+construct_runtime!(
 	pub enum Test
 	{
 		System: frame_system,
@@ -124,7 +116,7 @@ impl RankedMembers for TestClub {
 	}
 	fn demote(who: &Self::AccountId) -> DispatchResult {
 		CLUB.with(|club| match club.borrow().get(who) {
-			None => Err(sp_runtime::DispatchError::Unavailable),
+			None => Err(DispatchError::Unavailable),
 			Some(&0) => {
 				club.borrow_mut().remove(&who);
 				Ok(())
@@ -156,9 +148,9 @@ impl Config for Test {
 	type Budget = Budget;
 }
 
-pub fn new_test_ext() -> sp_io::TestExternalities {
+pub fn new_test_ext() -> TestState {
 	let t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-	let mut ext = sp_io::TestExternalities::new(t);
+	let mut ext = TestState::new(t);
 	ext.execute_with(|| System::set_block_number(1));
 	ext
 }
diff --git a/substrate/frame/salary/src/weights.rs b/substrate/frame/salary/src/weights.rs
index f1cdaaa225a44e7c7854780cca93ad04f4a59ca8..43c001b30d336d54fef9a88b07ff182bfa5e9cd9 100644
--- a/substrate/frame/salary/src/weights.rs
+++ b/substrate/frame/salary/src/weights.rs
@@ -46,8 +46,8 @@
 #![allow(unused_imports)]
 #![allow(missing_docs)]
 
-use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
 use core::marker::PhantomData;
+use frame::weights_prelude::*;
 
 /// Weight functions needed for `pallet_salary`.
 pub trait WeightInfo {
diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs
index 8031ddf96e6a1711cc38f167b55bc633f2d3ed8d..f79a52bc6c5bf8343a6a4a6d022be5090f71873c 100644
--- a/substrate/frame/src/lib.rs
+++ b/substrate/frame/src/lib.rs
@@ -203,12 +203,22 @@ pub mod prelude {
 	/// Dispatch types from `frame-support`, other fundamental traits
 	#[doc(no_inline)]
 	pub use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo};
-	pub use frame_support::traits::{Contains, IsSubType, OnRuntimeUpgrade};
+	pub use frame_support::{
+		defensive, defensive_assert,
+		traits::{
+			Contains, EitherOf, EstimateNextSessionRotation, IsSubType, MapSuccess, NoOpPoll,
+			OnRuntimeUpgrade, OneSessionHandler, RankedMembers, RankedMembersSwapHandler,
+		},
+	};
 
 	/// Pallet prelude of `frame-system`.
 	#[doc(no_inline)]
 	pub use frame_system::pallet_prelude::*;
 
+	/// Transaction related helpers to submit transactions.
+	#[doc(no_inline)]
+	pub use frame_system::offchain::*;
+
 	/// All FRAME-relevant derive macros.
 	#[doc(no_inline)]
 	pub use super::derive::*;
@@ -216,16 +226,20 @@ pub mod prelude {
 	/// All hashing related things
 	pub use super::hashing::*;
 
+	/// All arithmetic types and traits used for safe math.
+	pub use super::arithmetic::*;
+
 	/// Runtime traits
 	#[doc(no_inline)]
 	pub use sp_runtime::traits::{
-		BlockNumberProvider, Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion,
-		Saturating, StaticLookup, TrailingZeroInput,
+		BlockNumberProvider, Bounded, Convert, DispatchInfoOf, Dispatchable, ReduceBy,
+		ReplaceWithDefault, SaturatedConversion, Saturating, StaticLookup, TrailingZeroInput,
 	};
-
 	/// Other error/result types for runtime
 	#[doc(no_inline)]
-	pub use sp_runtime::{DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError};
+	pub use sp_runtime::{
+		BoundToRuntimeAppPublic, DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError,
+	};
 }
 
 #[cfg(any(feature = "try-runtime", test))]
@@ -251,7 +265,7 @@ pub mod benchmarking {
 		pub use frame_benchmarking::benchmarking::*;
 		// The system origin, which is very often needed in benchmarking code. Might be tricky only
 		// if the pallet defines its own `#[pallet::origin]` and call it `RawOrigin`.
-		pub use frame_system::RawOrigin;
+		pub use frame_system::{Pallet as System, RawOrigin};
 	}
 
 	#[deprecated(
@@ -308,15 +322,18 @@ pub mod testing_prelude {
 	/// Other helper macros from `frame_support` that help with asserting in tests.
 	pub use frame_support::{
 		assert_err, assert_err_ignore_postinfo, assert_error_encoded_size, assert_noop, assert_ok,
-		assert_storage_noop, storage_alias,
+		assert_storage_noop, hypothetically, storage_alias,
 	};
 
 	pub use frame_system::{self, mocking::*};
 
-	#[deprecated(note = "Use `frame::testing_prelude::TestExternalities` instead.")]
+	#[deprecated(note = "Use `frame::testing_prelude::TestState` instead.")]
 	pub use sp_io::TestExternalities;
 
 	pub use sp_io::TestExternalities as TestState;
+
+	/// Commonly used runtime traits for testing.
+	pub use sp_runtime::{traits::BadOrigin, StateVersion};
 }
 
 /// All of the types and tools needed to build FRAME-based runtimes.
@@ -484,6 +501,7 @@ pub mod runtime {
 			frame_system::CheckEra<T>,
 			frame_system::CheckNonce<T>,
 			frame_system::CheckWeight<T>,
+			frame_system::WeightReclaim<T>,
 		);
 	}
 
@@ -493,7 +511,7 @@ pub mod runtime {
 	#[cfg(feature = "std")]
 	pub mod testing_prelude {
 		pub use sp_core::storage::Storage;
-		pub use sp_runtime::BuildStorage;
+		pub use sp_runtime::{BuildStorage, DispatchError};
 	}
 }
 
@@ -509,6 +527,8 @@ pub mod traits {
 }
 
 /// The arithmetic types used for safe math.
+///
+/// This is already part of the [`prelude`].
 pub mod arithmetic {
 	pub use sp_arithmetic::{traits::*, *};
 }
diff --git a/substrate/frame/support/procedural/src/pallet/expand/config.rs b/substrate/frame/support/procedural/src/pallet/expand/config.rs
index 0a583f1359bac9dcd7682e3c8c6a4de4eaca9ba9..d39f276723600fe7ff5ca36306800bee87c68766 100644
--- a/substrate/frame/support/procedural/src/pallet/expand/config.rs
+++ b/substrate/frame/support/procedural/src/pallet/expand/config.rs
@@ -126,7 +126,7 @@ pub fn expand_config_metadata(def: &Def) -> proc_macro2::TokenStream {
 				ty: #frame_support::__private::scale_info::meta_type::<
 						<T as Config #trait_use_gen>::#ident
 					>(),
-				docs: #frame_support::__private::sp_std::vec![ #( #doc ),* ],
+				docs: #frame_support::__private::vec![ #( #doc ),* ],
 			}
 		})
 	});
@@ -136,9 +136,9 @@ pub fn expand_config_metadata(def: &Def) -> proc_macro2::TokenStream {
 
 			#[doc(hidden)]
 			pub fn pallet_associated_types_metadata()
-				-> #frame_support::__private::sp_std::vec::Vec<#frame_support::__private::metadata_ir::PalletAssociatedTypeMetadataIR>
+				-> #frame_support::__private::vec::Vec<#frame_support::__private::metadata_ir::PalletAssociatedTypeMetadataIR>
 			{
-				#frame_support::__private::sp_std::vec![ #( #types ),* ]
+				#frame_support::__private::vec![ #( #types ),* ]
 			}
 		}
 	)
diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs
index 483a3dce77f6a8b510f9c36b7d74805103ae8d8a..990996830030ce1a12ea165ffbe77052661e6edc 100644
--- a/substrate/frame/support/src/dispatch.rs
+++ b/substrate/frame/support/src/dispatch.rs
@@ -308,6 +308,19 @@ impl PostDispatchInfo {
 	/// Calculate how much weight was actually spent by the `Dispatchable`.
 	pub fn calc_actual_weight(&self, info: &DispatchInfo) -> Weight {
 		if let Some(actual_weight) = self.actual_weight {
+			let info_total_weight = info.total_weight();
+			if actual_weight.any_gt(info_total_weight) {
+				log::error!(
+					target: crate::LOG_TARGET,
+					"Post dispatch weight is greater than pre dispatch weight. \
+					Pre dispatch weight may underestimating the actual weight. \
+					Greater post dispatch weight components are ignored.
+					Pre dispatch weight: {:?},
+					Post dispatch weight: {:?}",
+					actual_weight,
+					info_total_weight,
+				);
+			}
 			actual_weight.min(info.total_weight())
 		} else {
 			info.total_weight()
diff --git a/substrate/frame/support/src/dispatch_context.rs b/substrate/frame/support/src/dispatch_context.rs
index b34c6bdada3d484b00745dd02154af849c124c0a..42776e71cb883db750ade8ce5a692e0f84b37e9a 100644
--- a/substrate/frame/support/src/dispatch_context.rs
+++ b/substrate/frame/support/src/dispatch_context.rs
@@ -140,7 +140,7 @@ impl<T> Value<'_, T> {
 
 /// Runs the given `callback` in the dispatch context and gives access to some user defined value.
 ///
-/// Passes the a mutable reference of [`Value`] to the callback. The value will be of type `T` and
+/// Passes a mutable reference of [`Value`] to the callback. The value will be of type `T` and
 /// is identified using the [`TypeId`] of `T`. This means that `T` should be some unique type to
 /// make the value unique. If no value is set yet [`Value::get()`] and [`Value::get_mut()`] will
 /// return `None`. It is totally valid to have some `T` that is shared between different callers to
diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs
index c64987b17d35c3994d8eb97fe6c841c09cbf7ea8..a6969260e6a26bedef86f55e7b27689978891aa7 100644
--- a/substrate/frame/support/src/lib.rs
+++ b/substrate/frame/support/src/lib.rs
@@ -44,6 +44,7 @@ pub mod __private {
 	pub use alloc::{
 		boxed::Box,
 		rc::Rc,
+		string::String,
 		vec,
 		vec::{IntoIter, Vec},
 	};
@@ -502,9 +503,9 @@ macro_rules! runtime_print {
 	($($arg:tt)+) => {
 		{
 			use core::fmt::Write;
-			let mut w = $crate::__private::sp_std::Writer::default();
-			let _ = core::write!(&mut w, $($arg)+);
-			$crate::__private::sp_io::misc::print_utf8(&w.inner())
+			let mut msg = $crate::__private::String::default();
+			let _ = core::write!(&mut msg, $($arg)+);
+			$crate::__private::sp_io::misc::print_utf8(msg.as_bytes())
 		}
 	}
 }
diff --git a/substrate/frame/support/src/storage/child.rs b/substrate/frame/support/src/storage/child.rs
index 5ebba269365851b488b8af4eacc3d498c874c2b2..7109e9213b0f6dc46c7164c28868695404e1bb49 100644
--- a/substrate/frame/support/src/storage/child.rs
+++ b/substrate/frame/support/src/storage/child.rs
@@ -163,7 +163,7 @@ pub fn kill_storage(child_info: &ChildInfo, limit: Option<u32>) -> KillStorageRe
 /// operating on the same prefix should pass `Some` and this value should be equal to the
 /// previous call result's `maybe_cursor` field. The only exception to this is when you can
 /// guarantee that the subsequent call is in a new block; in this case the previous call's result
-/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful
+/// cursor need not be passed in and a `None` may be passed instead. This exception may be useful
 /// then making this call solely from a block-hook such as `on_initialize`.
 
 /// Returns [`MultiRemovalResults`] to inform about the result. Once the resultant `maybe_cursor`
diff --git a/substrate/frame/support/src/storage/unhashed.rs b/substrate/frame/support/src/storage/unhashed.rs
index 7f9bc93d7d818ff5f025617e721c3f79587347cb..495c50caa2d6c121331112cd9a96d8c02f69a69b 100644
--- a/substrate/frame/support/src/storage/unhashed.rs
+++ b/substrate/frame/support/src/storage/unhashed.rs
@@ -124,7 +124,7 @@ pub fn kill_prefix(prefix: &[u8], limit: Option<u32>) -> sp_io::KillStorageResul
 /// operating on the same prefix should pass `Some` and this value should be equal to the
 /// previous call result's `maybe_cursor` field. The only exception to this is when you can
 /// guarantee that the subsequent call is in a new block; in this case the previous call's result
-/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful
+/// cursor need not be passed in and a `None` may be passed instead. This exception may be useful
 /// then making this call solely from a block-hook such as `on_initialize`.
 ///
 /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the
diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs
index 0dc3abdce956c1653e3bdc081585e2e6549ace5d..9fef4383ad67c5930db3d299889bd59a8f0336d1 100644
--- a/substrate/frame/support/src/traits/misc.rs
+++ b/substrate/frame/support/src/traits/misc.rs
@@ -66,7 +66,7 @@ impl<T: VariantCount> Get<u32> for VariantCountOf<T> {
 #[macro_export]
 macro_rules! defensive {
 	() => {
-		frame_support::__private::log::error!(
+		$crate::__private::log::error!(
 			target: "runtime::defensive",
 			"{}",
 			$crate::traits::DEFENSIVE_OP_PUBLIC_ERROR
@@ -74,7 +74,7 @@ macro_rules! defensive {
 		debug_assert!(false, "{}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR);
 	};
 	($error:expr $(,)?) => {
-		frame_support::__private::log::error!(
+		$crate::__private::log::error!(
 			target: "runtime::defensive",
 			"{}: {:?}",
 			$crate::traits::DEFENSIVE_OP_PUBLIC_ERROR,
@@ -83,7 +83,7 @@ macro_rules! defensive {
 		debug_assert!(false, "{}: {:?}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR, $error);
 	};
 	($error:expr, $proof:expr $(,)?) => {
-		frame_support::__private::log::error!(
+		$crate::__private::log::error!(
 			target: "runtime::defensive",
 			"{}: {:?}: {:?}",
 			$crate::traits::DEFENSIVE_OP_PUBLIC_ERROR,
diff --git a/substrate/frame/support/src/traits/preimages.rs b/substrate/frame/support/src/traits/preimages.rs
index 80020d8d00809af048334d8614e8d7f8953c6525..6e46a7489654ee61b76bf3da219fcafec4333acd 100644
--- a/substrate/frame/support/src/traits/preimages.rs
+++ b/substrate/frame/support/src/traits/preimages.rs
@@ -38,7 +38,7 @@ pub enum Bounded<T, H: Hash> {
 	/// for transitioning from legacy state. In the future we will make this a pure
 	/// `Dummy` item storing only the final `dummy` field.
 	Legacy { hash: H::Output, dummy: core::marker::PhantomData<T> },
-	/// A an bounded `Call`. Its encoding must be at most 128 bytes.
+	/// A bounded `Call`. Its encoding must be at most 128 bytes.
 	Inline(BoundedInline),
 	/// A hash of the call together with an upper limit for its size.`
 	Lookup { hash: H::Output, len: u32 },
diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml
index 1340b2c55c5323d23d0efb848f80e845c737987e..8883ebd4c41df9cfd9809253ef659ae584fe5e5e 100644
--- a/substrate/frame/system/Cargo.toml
+++ b/substrate/frame/system/Cargo.toml
@@ -26,7 +26,6 @@ serde = { features = ["alloc", "derive"], workspace = true }
 sp-core = { features = ["serde"], workspace = true }
 sp-io = { workspace = true }
 sp-runtime = { features = ["serde"], workspace = true }
-sp-std = { workspace = true }
 sp-version = { features = ["serde"], workspace = true }
 sp-weights = { features = ["serde"], workspace = true }
 
@@ -47,7 +46,6 @@ std = [
 	"sp-externalities/std",
 	"sp-io/std",
 	"sp-runtime/std",
-	"sp-std/std",
 	"sp-version/std",
 	"sp-weights/std",
 ]
diff --git a/substrate/frame/system/benchmarking/src/extensions.rs b/substrate/frame/system/benchmarking/src/extensions.rs
index 01e4687bc4bceecd21f9d9c097e65db4f7305105..25d6ea03557887d1554a2a417d781aaa2c79bee7 100644
--- a/substrate/frame/system/benchmarking/src/extensions.rs
+++ b/substrate/frame/system/benchmarking/src/extensions.rs
@@ -29,7 +29,7 @@ use frame_support::{
 use frame_system::{
 	pallet_prelude::*, CheckGenesis, CheckMortality, CheckNonZeroSender, CheckNonce,
 	CheckSpecVersion, CheckTxVersion, CheckWeight, Config, ExtensionsWeightInfo, Pallet as System,
-	RawOrigin,
+	RawOrigin, WeightReclaim,
 };
 use sp_runtime::{
 	generic::Era,
@@ -254,5 +254,49 @@ mod benchmarks {
 		Ok(())
 	}
 
+	#[benchmark]
+	fn weight_reclaim() -> Result<(), BenchmarkError> {
+		let caller = account("caller", 0, 0);
+		let base_extrinsic = <T as frame_system::Config>::BlockWeights::get()
+			.get(DispatchClass::Normal)
+			.base_extrinsic;
+		let extension_weight = <T as frame_system::Config>::ExtensionsWeightInfo::weight_reclaim();
+		let info = DispatchInfo {
+			call_weight: Weight::from_parts(base_extrinsic.ref_time() * 5, 0),
+			extension_weight,
+			class: DispatchClass::Normal,
+			..Default::default()
+		};
+		let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into();
+		let post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(base_extrinsic.ref_time() * 2, 0)),
+			pays_fee: Default::default(),
+		};
+		let len = 0_usize;
+		let ext = WeightReclaim::<T>::new();
+
+		let initial_block_weight = Weight::from_parts(base_extrinsic.ref_time() * 2, 0);
+		frame_system::BlockWeight::<T>::mutate(|current_weight| {
+			current_weight.set(Weight::zero(), DispatchClass::Mandatory);
+			current_weight.set(initial_block_weight, DispatchClass::Normal);
+			current_weight.accrue(base_extrinsic + info.total_weight(), DispatchClass::Normal);
+		});
+
+		#[block]
+		{
+			ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(post_info))
+				.unwrap()
+				.unwrap();
+		}
+
+		assert_eq!(
+			System::<T>::block_weight().total(),
+			initial_block_weight +
+				base_extrinsic +
+				post_info.actual_weight.unwrap().saturating_add(extension_weight),
+		);
+		Ok(())
+	}
+
 	impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test,);
 }
diff --git a/substrate/frame/system/benchmarking/src/mock.rs b/substrate/frame/system/benchmarking/src/mock.rs
index 6b126619ce5bfab57882868e19c936cbdec0eb27..61b5b885ec623f190c4bc618e033517b1e74f2ac 100644
--- a/substrate/frame/system/benchmarking/src/mock.rs
+++ b/substrate/frame/system/benchmarking/src/mock.rs
@@ -65,6 +65,10 @@ impl frame_system::ExtensionsWeightInfo for MockWeights {
 	fn check_weight() -> Weight {
 		Weight::from_parts(10, 0)
 	}
+
+	fn weight_reclaim() -> Weight {
+		Weight::from_parts(10, 0)
+	}
 }
 
 #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs
index 577e2b324fca5b981b58dd794aad28c7d67cdc3b..978eebaf3dace5542991573c942f222015148d51 100644
--- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs
+++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs
@@ -86,7 +86,7 @@ mod tests {
 	use crate::mock::{new_test_ext, Test, CALL};
 	use frame_support::{assert_ok, dispatch::DispatchInfo};
 	use sp_runtime::{
-		traits::{AsTransactionAuthorizedOrigin, DispatchTransaction},
+		traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, TxBaseImplication},
 		transaction_validity::{TransactionSource::External, TransactionValidityError},
 	};
 
@@ -118,7 +118,7 @@ mod tests {
 			let info = DispatchInfo::default();
 			let len = 0_usize;
 			let (_, _, origin) = CheckNonZeroSender::<Test>::new()
-				.validate(None.into(), CALL, &info, len, (), CALL, External)
+				.validate(None.into(), CALL, &info, len, (), &TxBaseImplication(CALL), External)
 				.unwrap();
 			assert!(!origin.is_transaction_authorized());
 		})
diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs
index 004ec08a26f26f1b480d11f232a532bc4d8d269c..bc19a09e06a9e0d567441d066218b69377401c01 100644
--- a/substrate/frame/system/src/extensions/check_nonce.rs
+++ b/substrate/frame/system/src/extensions/check_nonce.rs
@@ -186,7 +186,7 @@ mod tests {
 		assert_ok, assert_storage_noop, dispatch::GetDispatchInfo, traits::OriginTrait,
 	};
 	use sp_runtime::{
-		traits::{AsTransactionAuthorizedOrigin, DispatchTransaction},
+		traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, TxBaseImplication},
 		transaction_validity::TransactionSource::External,
 	};
 
@@ -335,7 +335,7 @@ mod tests {
 			let info = DispatchInfo::default();
 			let len = 0_usize;
 			let (_, val, origin) = CheckNonce::<Test>(1u64.into())
-				.validate(None.into(), CALL, &info, len, (), CALL, External)
+				.validate(None.into(), CALL, &info, len, (), &TxBaseImplication(CALL), External)
 				.unwrap();
 			assert!(!origin.is_transaction_authorized());
 			assert_ok!(CheckNonce::<Test>(1u64.into()).prepare(val, &origin, CALL, &info, len));
@@ -359,7 +359,7 @@ mod tests {
 			let len = 0_usize;
 			// run the validation step
 			let (_, val, origin) = CheckNonce::<Test>(1u64.into())
-				.validate(Some(1).into(), CALL, &info, len, (), CALL, External)
+				.validate(Some(1).into(), CALL, &info, len, (), &TxBaseImplication(CALL), External)
 				.unwrap();
 			// mutate `AccountData` for the caller
 			crate::Account::<Test>::mutate(1, |info| {
diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs
index ee91478b90f314ddce0c7afffe5ab292abebc2ce..de0303defd0c3e8c9fc502d3017127e1c08c54ac 100644
--- a/substrate/frame/system/src/extensions/check_weight.rs
+++ b/substrate/frame/system/src/extensions/check_weight.rs
@@ -135,30 +135,12 @@ where
 		Ok(())
 	}
 
+	#[deprecated(note = "Use `frame_system::Pallet::reclaim_weight` instead.")]
 	pub fn do_post_dispatch(
 		info: &DispatchInfoOf<T::RuntimeCall>,
 		post_info: &PostDispatchInfoOf<T::RuntimeCall>,
 	) -> Result<(), TransactionValidityError> {
-		let unspent = post_info.calc_unspent(info);
-		if unspent.any_gt(Weight::zero()) {
-			crate::BlockWeight::<T>::mutate(|current_weight| {
-				current_weight.reduce(unspent, info.class);
-			})
-		}
-
-		log::trace!(
-			target: LOG_TARGET,
-			"Used block weight: {:?}",
-			crate::BlockWeight::<T>::get(),
-		);
-
-		log::trace!(
-			target: LOG_TARGET,
-			"Used block length: {:?}",
-			Pallet::<T>::all_extrinsics_len(),
-		);
-
-		Ok(())
+		crate::Pallet::<T>::reclaim_weight(info, post_info)
 	}
 }
 
@@ -279,8 +261,7 @@ where
 		_len: usize,
 		_result: &DispatchResult,
 	) -> Result<Weight, TransactionValidityError> {
-		Self::do_post_dispatch(info, post_info)?;
-		Ok(Weight::zero())
+		crate::Pallet::<T>::reclaim_weight(info, post_info).map(|()| Weight::zero())
 	}
 
 	fn bare_validate(
@@ -306,7 +287,7 @@ where
 		_len: usize,
 		_result: &DispatchResult,
 	) -> Result<(), TransactionValidityError> {
-		Self::do_post_dispatch(info, post_info)
+		crate::Pallet::<T>::reclaim_weight(info, post_info)
 	}
 }
 
@@ -744,6 +725,121 @@ mod tests {
 		})
 	}
 
+	#[test]
+	fn extrinsic_already_refunded_more_precisely() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let accurate_refund = Weight::from_parts(510, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(Weight::zero(), DispatchClass::Mandatory);
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+			});
+
+			// Validate and prepare extrinsic
+			let pre = CheckWeight::<Test>(PhantomData)
+				.validate_and_prepare(Some(1).into(), CALL, &info, len, 0)
+				.unwrap()
+				.0;
+
+			assert_eq!(
+				BlockWeight::<Test>::get().total(),
+				info.total_weight() + prior_block_weight + base_extrinsic
+			);
+
+			// Refund more accurately than the benchmark
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.reduce(accurate_refund, DispatchClass::Normal);
+			});
+			crate::ExtrinsicWeightReclaimed::<Test>::put(accurate_refund);
+
+			// Do the post dispatch
+			assert_ok!(CheckWeight::<Test>::post_dispatch_details(
+				pre,
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund is used
+			assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), accurate_refund);
+			assert_eq!(
+				BlockWeight::<Test>::get().total(),
+				info.total_weight() - accurate_refund + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn extrinsic_already_refunded_less_precisely() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let inaccurate_refund = Weight::from_parts(110, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(Weight::zero(), DispatchClass::Mandatory);
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+			});
+
+			// Validate and prepare extrinsic
+			let pre = CheckWeight::<Test>(PhantomData)
+				.validate_and_prepare(Some(1).into(), CALL, &info, len, 0)
+				.unwrap()
+				.0;
+
+			assert_eq!(
+				BlockWeight::<Test>::get().total(),
+				info.total_weight() + prior_block_weight + base_extrinsic
+			);
+
+			// Refund less accurately than the benchmark
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.reduce(inaccurate_refund, DispatchClass::Normal);
+			});
+			crate::ExtrinsicWeightReclaimed::<Test>::put(inaccurate_refund);
+
+			// Do the post dispatch
+			assert_ok!(CheckWeight::<Test>::post_dispatch_details(
+				pre,
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				BlockWeight::<Test>::get().total(),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
 	#[test]
 	fn zero_weight_extrinsic_still_has_base_weight() {
 		new_test_ext().execute_with(|| {
diff --git a/substrate/frame/system/src/extensions/mod.rs b/substrate/frame/system/src/extensions/mod.rs
index d79104d224035450f9ca69ba7d4b502b9ff0289d..66a8b17d30ae15b09a4af20a9dd703a52d18de9a 100644
--- a/substrate/frame/system/src/extensions/mod.rs
+++ b/substrate/frame/system/src/extensions/mod.rs
@@ -22,6 +22,7 @@ pub mod check_nonce;
 pub mod check_spec_version;
 pub mod check_tx_version;
 pub mod check_weight;
+pub mod weight_reclaim;
 pub mod weights;
 
 pub use weights::WeightInfo;
diff --git a/substrate/frame/system/src/extensions/weight_reclaim.rs b/substrate/frame/system/src/extensions/weight_reclaim.rs
new file mode 100644
index 0000000000000000000000000000000000000000..0c37422a843bb7595497e8c2cc939ac39dee1c5e
--- /dev/null
+++ b/substrate/frame/system/src/extensions/weight_reclaim.rs
@@ -0,0 +1,401 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::Config;
+use codec::{Decode, Encode};
+use frame_support::dispatch::{DispatchInfo, PostDispatchInfo};
+use scale_info::TypeInfo;
+use sp_runtime::{
+	traits::{
+		DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension, ValidateResult,
+	},
+	transaction_validity::{TransactionSource, TransactionValidityError, ValidTransaction},
+	DispatchResult,
+};
+use sp_weights::Weight;
+
+/// Reclaim the unused weight using the post dispatch information
+///
+/// After the dispatch of the extrinsic, calculate the unused weight using the post dispatch
+/// information and update the block consumed weight according to the new calculated extrinsic
+/// weight.
+#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)]
+#[scale_info(skip_type_params(T))]
+pub struct WeightReclaim<T: Config + Send + Sync>(core::marker::PhantomData<T>);
+
+impl<T: Config + Send + Sync> WeightReclaim<T>
+where
+	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+{
+	/// Creates new `TransactionExtension` to recalculate the extrinsic weight after dispatch.
+	pub fn new() -> Self {
+		Self(Default::default())
+	}
+}
+
+impl<T: Config + Send + Sync> TransactionExtension<T::RuntimeCall> for WeightReclaim<T>
+where
+	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+{
+	const IDENTIFIER: &'static str = "WeightReclaim";
+	type Implicit = ();
+	type Pre = ();
+	type Val = ();
+
+	fn weight(&self, _: &T::RuntimeCall) -> Weight {
+		<T::ExtensionsWeightInfo as super::WeightInfo>::weight_reclaim()
+	}
+
+	fn validate(
+		&self,
+		origin: T::RuntimeOrigin,
+		_call: &T::RuntimeCall,
+		_info: &DispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+		_self_implicit: Self::Implicit,
+		_inherited_implication: &impl Encode,
+		_source: TransactionSource,
+	) -> ValidateResult<Self::Val, T::RuntimeCall> {
+		Ok((ValidTransaction::default(), (), origin))
+	}
+
+	fn prepare(
+		self,
+		_val: Self::Val,
+		_origin: &T::RuntimeOrigin,
+		_call: &T::RuntimeCall,
+		_info: &DispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+	) -> Result<Self::Pre, TransactionValidityError> {
+		Ok(())
+	}
+
+	fn post_dispatch_details(
+		_pre: Self::Pre,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &PostDispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+		_result: &DispatchResult,
+	) -> Result<Weight, TransactionValidityError> {
+		crate::Pallet::<T>::reclaim_weight(info, post_info).map(|()| Weight::zero())
+	}
+
+	fn bare_validate(
+		_call: &T::RuntimeCall,
+		_info: &DispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+	) -> frame_support::pallet_prelude::TransactionValidity {
+		Ok(ValidTransaction::default())
+	}
+
+	fn bare_validate_and_prepare(
+		_call: &T::RuntimeCall,
+		_info: &DispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+	) -> Result<(), TransactionValidityError> {
+		Ok(())
+	}
+
+	fn bare_post_dispatch(
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &mut PostDispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+		_result: &DispatchResult,
+	) -> Result<(), TransactionValidityError> {
+		crate::Pallet::<T>::reclaim_weight(info, post_info)
+	}
+}
+
+impl<T: Config + Send + Sync> core::fmt::Debug for WeightReclaim<T>
+where
+	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+{
+	fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+		write!(f, "{}", Self::IDENTIFIER)
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use crate::{
+		mock::{new_test_ext, Test},
+		BlockWeight, DispatchClass,
+	};
+	use frame_support::{assert_ok, weights::Weight};
+
+	fn block_weights() -> crate::limits::BlockWeights {
+		<Test as crate::Config>::BlockWeights::get()
+	}
+
+	#[test]
+	fn extrinsic_already_refunded_more_precisely() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let accurate_refund = Weight::from_parts(510, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+				current_weight.accrue(
+					base_extrinsic + info.total_weight() - accurate_refund,
+					DispatchClass::Normal,
+				);
+			});
+			crate::ExtrinsicWeightReclaimed::<Test>::put(accurate_refund);
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund is used
+			assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), accurate_refund);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				info.total_weight() - accurate_refund + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn extrinsic_already_refunded_less_precisely() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let inaccurate_refund = Weight::from_parts(110, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+				current_weight.accrue(
+					base_extrinsic + info.total_weight() - inaccurate_refund,
+					DispatchClass::Normal,
+				);
+			});
+			crate::ExtrinsicWeightReclaimed::<Test>::put(inaccurate_refund);
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn extrinsic_not_refunded_before() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+				current_weight.accrue(base_extrinsic + info.total_weight(), DispatchClass::Normal);
+			});
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn no_actual_post_dispatch_weight() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() };
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+				current_weight.accrue(base_extrinsic + info.total_weight(), DispatchClass::Normal);
+			});
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				info.total_weight() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn different_dispatch_class() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info = DispatchInfo {
+				call_weight: Weight::from_parts(512, 0),
+				class: DispatchClass::Operational,
+				..Default::default()
+			};
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Operational).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Operational);
+				current_weight
+					.accrue(base_extrinsic + info.total_weight(), DispatchClass::Operational);
+			});
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Operational),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn bare_also_works() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info = DispatchInfo {
+				call_weight: Weight::from_parts(512, 0),
+				class: DispatchClass::Operational,
+				..Default::default()
+			};
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Operational).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Operational);
+				current_weight
+					.accrue(base_extrinsic + info.total_weight(), DispatchClass::Operational);
+			});
+
+			// Do the bare post dispatch
+			assert_ok!(WeightReclaim::<Test>::bare_post_dispatch(
+				&info,
+				&mut post_info.clone(),
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Operational),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+}
diff --git a/substrate/frame/system/src/extensions/weights.rs b/substrate/frame/system/src/extensions/weights.rs
index b3c296899be52e8d308dd48fdeb662327be26dfb..670bb9a0e6fab2165cc29e04d757300a2eeae4a4 100644
--- a/substrate/frame/system/src/extensions/weights.rs
+++ b/substrate/frame/system/src/extensions/weights.rs
@@ -59,6 +59,7 @@ pub trait WeightInfo {
 	fn check_spec_version() -> Weight;
 	fn check_tx_version() -> Weight;
 	fn check_weight() -> Weight;
+	fn weight_reclaim() -> Weight;
 }
 
 /// Weights for `frame_system_extensions` using the Substrate node and recommended hardware.
@@ -133,6 +134,17 @@ impl<T: crate::Config> WeightInfo for SubstrateWeight<T> {
 		// Minimum execution time: 2_887_000 picoseconds.
 		Weight::from_parts(3_006_000, 0)
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1489`
+		// Minimum execution time: 4_375_000 picoseconds.
+		Weight::from_parts(4_747_000, 1489)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
 }
 
 // For backwards compatibility and tests.
@@ -206,4 +218,15 @@ impl WeightInfo for () {
 		// Minimum execution time: 2_887_000 picoseconds.
 		Weight::from_parts(3_006_000, 0)
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1489`
+		// Minimum execution time: 4_375_000 picoseconds.
+		Weight::from_parts(4_747_000, 1489)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
 }
diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs
index 862fb4cf9faf8c23c57bbbc713894d6e31325155..894e1898ed15523576eef010c211a3f0f79dece5 100644
--- a/substrate/frame/system/src/lib.rs
+++ b/substrate/frame/system/src/lib.rs
@@ -120,8 +120,6 @@ use sp_runtime::{
 	},
 	DispatchError, RuntimeDebug,
 };
-#[cfg(any(feature = "std", test))]
-use sp_std::map;
 use sp_version::RuntimeVersion;
 
 use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen};
@@ -146,6 +144,10 @@ use frame_support::{
 };
 use scale_info::TypeInfo;
 use sp_core::storage::well_known_keys;
+use sp_runtime::{
+	traits::{DispatchInfoOf, PostDispatchInfoOf},
+	transaction_validity::TransactionValidityError,
+};
 use sp_weights::{RuntimeDbWeight, Weight};
 
 #[cfg(any(feature = "std", test))]
@@ -170,7 +172,7 @@ pub use extensions::{
 	check_genesis::CheckGenesis, check_mortality::CheckMortality,
 	check_non_zero_sender::CheckNonZeroSender, check_nonce::CheckNonce,
 	check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion,
-	check_weight::CheckWeight, WeightInfo as ExtensionsWeightInfo,
+	check_weight::CheckWeight, weight_reclaim::WeightReclaim, WeightInfo as ExtensionsWeightInfo,
 };
 // Backward compatible re-export.
 pub use extensions::check_mortality::CheckMortality as CheckEra;
@@ -1039,6 +1041,17 @@ pub mod pallet {
 	pub(super) type AuthorizedUpgrade<T: Config> =
 		StorageValue<_, CodeUpgradeAuthorization<T>, OptionQuery>;
 
+	/// The weight reclaimed for the extrinsic.
+	///
+	/// This information is available until the end of the extrinsic execution.
+	/// More precisely this information is removed in `note_applied_extrinsic`.
+	///
+	/// Logic doing some post dispatch weight reduction must update this storage to avoid duplicate
+	/// reduction.
+	#[pallet::storage]
+	#[pallet::whitelist_storage]
+	pub type ExtrinsicWeightReclaimed<T: Config> = StorageValue<_, Weight, ValueQuery>;
+
 	#[derive(frame_support::DefaultNoBound)]
 	#[pallet::genesis_config]
 	pub struct GenesisConfig<T: Config> {
@@ -1905,12 +1918,14 @@ impl<T: Config> Pallet<T> {
 	#[cfg(any(feature = "std", test))]
 	pub fn externalities() -> TestExternalities {
 		TestExternalities::new(sp_core::storage::Storage {
-			top: map![
-				<BlockHash<T>>::hashed_key_for(BlockNumberFor::<T>::zero()) => [69u8; 32].encode(),
-				<Number<T>>::hashed_key().to_vec() => BlockNumberFor::<T>::one().encode(),
-				<ParentHash<T>>::hashed_key().to_vec() => [69u8; 32].encode()
-			],
-			children_default: map![],
+			top: [
+				(<BlockHash<T>>::hashed_key_for(BlockNumberFor::<T>::zero()), [69u8; 32].encode()),
+				(<Number<T>>::hashed_key().to_vec(), BlockNumberFor::<T>::one().encode()),
+				(<ParentHash<T>>::hashed_key().to_vec(), [69u8; 32].encode()),
+			]
+			.into_iter()
+			.collect(),
+			children_default: Default::default(),
 		})
 	}
 
@@ -2073,10 +2088,23 @@ impl<T: Config> Pallet<T> {
 			},
 		});
 
+		log::trace!(
+			target: LOG_TARGET,
+			"Used block weight: {:?}",
+			BlockWeight::<T>::get(),
+		);
+
+		log::trace!(
+			target: LOG_TARGET,
+			"Used block length: {:?}",
+			Pallet::<T>::all_extrinsics_len(),
+		);
+
 		let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32;
 
 		storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &next_extrinsic_index);
 		ExecutionPhase::<T>::put(Phase::ApplyExtrinsic(next_extrinsic_index));
+		ExtrinsicWeightReclaimed::<T>::kill();
 	}
 
 	/// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block
@@ -2174,6 +2202,32 @@ impl<T: Config> Pallet<T> {
 		}
 		Ok(actual_hash)
 	}
+
+	/// Reclaim the weight for the extrinsic given info and post info.
+	///
+	/// This function will check the already reclaimed weight, and reclaim more if the
+	/// difference between pre dispatch and post dispatch weight is higher.
+	pub fn reclaim_weight(
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &PostDispatchInfoOf<T::RuntimeCall>,
+	) -> Result<(), TransactionValidityError>
+	where
+		T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+	{
+		let already_reclaimed = crate::ExtrinsicWeightReclaimed::<T>::get();
+		let unspent = post_info.calc_unspent(info);
+		let accurate_reclaim = already_reclaimed.max(unspent);
+		// Saturation never happens, we took the maximum above.
+		let to_reclaim_more = accurate_reclaim.saturating_sub(already_reclaimed);
+		if to_reclaim_more != Weight::zero() {
+			crate::BlockWeight::<T>::mutate(|current_weight| {
+				current_weight.reduce(to_reclaim_more, info.class);
+			});
+			crate::ExtrinsicWeightReclaimed::<T>::put(accurate_reclaim);
+		}
+
+		Ok(())
+	}
 }
 
 /// Returns a 32 byte datum which is guaranteed to be universally unique. `entropy` is provided
diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs
index 6b903f5b7e79eccbeacc6180e2828ea74897f72d..6415380b2848564cf3c3ed4a95642d147db539c9 100644
--- a/substrate/frame/system/src/tests.rs
+++ b/substrate/frame/system/src/tests.rs
@@ -892,3 +892,67 @@ fn test_default_account_nonce() {
 		assert_eq!(System::account_nonce(&1), 5u64.into());
 	});
 }
+
+#[test]
+fn extrinsic_weight_refunded_is_cleaned() {
+	new_test_ext().execute_with(|| {
+		crate::ExtrinsicWeightReclaimed::<Test>::put(Weight::from_parts(1, 2));
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(1, 2));
+		System::note_applied_extrinsic(&Ok(().into()), Default::default());
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::zero());
+
+		crate::ExtrinsicWeightReclaimed::<Test>::put(Weight::from_parts(1, 2));
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(1, 2));
+		System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.into()), Default::default());
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::zero());
+	});
+}
+
+#[test]
+fn reclaim_works() {
+	new_test_ext().execute_with(|| {
+		let info = DispatchInfo { call_weight: Weight::from_parts(100, 200), ..Default::default() };
+		crate::Pallet::<Test>::reclaim_weight(
+			&info,
+			&PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(50, 100)),
+				..Default::default()
+			},
+		)
+		.unwrap();
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(50, 100));
+
+		crate::Pallet::<Test>::reclaim_weight(
+			&info,
+			&PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(25, 200)),
+				..Default::default()
+			},
+		)
+		.unwrap();
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(75, 100));
+
+		crate::Pallet::<Test>::reclaim_weight(
+			&info,
+			&PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(300, 50)),
+				..Default::default()
+			},
+		)
+		.unwrap();
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(75, 150));
+
+		crate::Pallet::<Test>::reclaim_weight(
+			&info,
+			&PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(300, 300)),
+				..Default::default()
+			},
+		)
+		.unwrap();
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(75, 150));
+
+		System::note_applied_extrinsic(&Ok(().into()), Default::default());
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::zero());
+	});
+}
diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs
index dd907f6fcbb74eacfeced2a25222122177b37532..5ba1d1297679eb6cc6b0d905a70a98d57b99a563 100644
--- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs
+++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs
@@ -46,7 +46,8 @@ use frame_support::{
 use scale_info::{StaticTypeInfo, TypeInfo};
 use sp_runtime::{
 	traits::{
-		DispatchInfoOf, DispatchOriginOf, PostDispatchInfoOf, TransactionExtension, ValidateResult,
+		DispatchInfoOf, DispatchOriginOf, Implication, PostDispatchInfoOf, TransactionExtension,
+		ValidateResult,
 	},
 	transaction_validity::TransactionValidityError,
 };
@@ -147,7 +148,7 @@ where
 		info: &DispatchInfoOf<T::RuntimeCall>,
 		len: usize,
 		self_implicit: S::Implicit,
-		inherited_implication: &impl Encode,
+		inherited_implication: &impl Implication,
 		source: TransactionSource,
 	) -> ValidateResult<Self::Val, T::RuntimeCall> {
 		if call.is_feeless(&origin) {
diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml
index 135292fb4ecd53e56ad3db4afada1bc41d440c61..a2473c51ee75cc687c4a8e8bed9f9aa7cf6fdfee 100644
--- a/substrate/frame/uniques/Cargo.toml
+++ b/substrate/frame/uniques/Cargo.toml
@@ -28,7 +28,6 @@ sp-runtime = { workspace = true }
 pallet-balances = { workspace = true, default-features = true }
 sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 [features]
 default = ["std"]
diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs
index b412d4b52fed5208d0d2fe8ace56977822b3b55f..8909d2b2e48617d48d9a8ebd39fc25a23e7d3d6f 100644
--- a/substrate/primitives/api/src/lib.rs
+++ b/substrate/primitives/api/src/lib.rs
@@ -666,7 +666,7 @@ pub struct CallApiAtParams<'a, Block: BlockT> {
 	pub extensions: &'a RefCell<Extensions>,
 }
 
-/// Something that can call into the an api at a given block.
+/// Something that can call into an api at a given block.
 #[cfg(feature = "std")]
 pub trait CallApiAt<Block: BlockT> {
 	/// The state backend that is used to store the block states.
diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs
index 1842b1631621a9158f09422cb744dc54d27df935..dec818598472334214ba7e8b228530d5259583d0 100644
--- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs
+++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs
@@ -85,7 +85,6 @@ where
 		match self.format {
 			ExtrinsicFormat::Bare => {
 				let inherent_validation = I::validate_unsigned(source, &self.function)?;
-				#[allow(deprecated)]
 				let legacy_validation = Extension::bare_validate(&self.function, info, len)?;
 				Ok(legacy_validation.combine_with(inherent_validation))
 			},
diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs
index d8510a60a78938e70c5f6d954a58ea5de60d5154..6b8471f848466e7c1d7efa8e534c3a9af3b1887b 100644
--- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs
+++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs
@@ -683,7 +683,7 @@ mod legacy {
 		Extra: Encode,
 	{
 		fn encode(&self) -> Vec<u8> {
-			let mut tmp = Vec::with_capacity(sp_std::mem::size_of::<Self>());
+			let mut tmp = Vec::with_capacity(core::mem::size_of::<Self>());
 
 			// 1 byte version id.
 			match self.signature.as_ref() {
diff --git a/substrate/primitives/runtime/src/proving_trie/base16.rs b/substrate/primitives/runtime/src/proving_trie/base16.rs
index da05c551c6d9478fd393a9daefdc2398c662cbae..abdf6ed178bb8fbd7f4b89c24cf638ddf364515d 100644
--- a/substrate/primitives/runtime/src/proving_trie/base16.rs
+++ b/substrate/primitives/runtime/src/proving_trie/base16.rs
@@ -26,8 +26,8 @@
 
 use super::{ProofToHashes, ProvingTrie, TrieError};
 use crate::{Decode, DispatchError, Encode};
+use alloc::vec::Vec;
 use codec::MaxEncodedLen;
-use sp_std::vec::Vec;
 use sp_trie::{
 	trie_types::{TrieDBBuilder, TrieDBMutBuilderV1},
 	LayoutV1, MemoryDB, Trie, TrieMut,
@@ -197,7 +197,7 @@ mod tests {
 	use super::*;
 	use crate::traits::BlakeTwo256;
 	use sp_core::H256;
-	use sp_std::collections::btree_map::BTreeMap;
+	use std::collections::BTreeMap;
 
 	// A trie which simulates a trie of accounts (u32) and balances (u128).
 	type BalanceTrie = BasicProvingTrie<BlakeTwo256, u32, u128>;
diff --git a/substrate/primitives/runtime/src/proving_trie/base2.rs b/substrate/primitives/runtime/src/proving_trie/base2.rs
index 2b14a59ab056c87dca2352a7824b9f740945e897..8a7cfaa5149df9e5a78fd60f285eecb7022141cf 100644
--- a/substrate/primitives/runtime/src/proving_trie/base2.rs
+++ b/substrate/primitives/runtime/src/proving_trie/base2.rs
@@ -22,9 +22,9 @@
 
 use super::{ProofToHashes, ProvingTrie, TrieError};
 use crate::{Decode, DispatchError, Encode};
+use alloc::{collections::BTreeMap, vec::Vec};
 use binary_merkle_tree::{merkle_proof, merkle_root, MerkleProof};
 use codec::MaxEncodedLen;
-use sp_std::{collections::btree_map::BTreeMap, vec::Vec};
 
 /// A helper structure for building a basic base-2 merkle trie and creating compact proofs for that
 /// trie.
@@ -161,7 +161,7 @@ mod tests {
 	use super::*;
 	use crate::traits::BlakeTwo256;
 	use sp_core::H256;
-	use sp_std::collections::btree_map::BTreeMap;
+	use std::collections::BTreeMap;
 
 	// A trie which simulates a trie of accounts (u32) and balances (u128).
 	type BalanceTrie = BasicProvingTrie<BlakeTwo256, u32, u128>;
diff --git a/substrate/primitives/runtime/src/proving_trie/mod.rs b/substrate/primitives/runtime/src/proving_trie/mod.rs
index 009aa6d4935fd9231c57a5425365bebdc72411c1..32b2284b4d79d9e7481e8231cda3dfa454c84ffc 100644
--- a/substrate/primitives/runtime/src/proving_trie/mod.rs
+++ b/substrate/primitives/runtime/src/proving_trie/mod.rs
@@ -23,7 +23,7 @@ pub mod base2;
 use crate::{Decode, DispatchError, Encode, MaxEncodedLen, TypeInfo};
 #[cfg(feature = "serde")]
 use crate::{Deserialize, Serialize};
-use sp_std::vec::Vec;
+use alloc::vec::Vec;
 use sp_trie::{trie_types::TrieError as SpTrieError, VerifyError};
 
 /// A runtime friendly error type for tries.
diff --git a/substrate/primitives/runtime/src/runtime_logger.rs b/substrate/primitives/runtime/src/runtime_logger.rs
index 79984b13567250fe7c513c84f28e1276d1cc7ec8..ec5251d978f13a6f5dc39086bf875b1a96fcb85a 100644
--- a/substrate/primitives/runtime/src/runtime_logger.rs
+++ b/substrate/primitives/runtime/src/runtime_logger.rs
@@ -54,10 +54,10 @@ impl log::Log for RuntimeLogger {
 
 	fn log(&self, record: &log::Record) {
 		use core::fmt::Write;
-		let mut w = sp_std::Writer::default();
-		let _ = ::core::write!(&mut w, "{}", record.args());
+		let mut msg = alloc::string::String::default();
+		let _ = ::core::write!(&mut msg, "{}", record.args());
 
-		sp_io::logging::log(record.level().into(), record.target(), w.inner());
+		sp_io::logging::log(record.level().into(), record.target(), msg.as_bytes());
 	}
 
 	fn flush(&self) {}
diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs
index cfcc3e5a354d6d3fbf513f0ad09e6480c1c1a349..8f5b484e4e3f5b84401ec5c0567bbfc83a853bf3 100644
--- a/substrate/primitives/runtime/src/traits/mod.rs
+++ b/substrate/primitives/runtime/src/traits/mod.rs
@@ -55,7 +55,8 @@ use std::str::FromStr;
 
 pub mod transaction_extension;
 pub use transaction_extension::{
-	DispatchTransaction, TransactionExtension, TransactionExtensionMetadata, ValidateResult,
+	DispatchTransaction, Implication, ImplicationParts, TransactionExtension,
+	TransactionExtensionMetadata, TxBaseImplication, ValidateResult,
 };
 
 /// A lazy value.
@@ -1709,7 +1710,7 @@ pub trait SignedExtension:
 	/// This method provides a default implementation that returns a vec containing a single
 	/// [`TransactionExtensionMetadata`].
 	fn metadata() -> Vec<TransactionExtensionMetadata> {
-		sp_std::vec![TransactionExtensionMetadata {
+		alloc::vec![TransactionExtensionMetadata {
 			identifier: Self::IDENTIFIER,
 			ty: scale_info::meta_type::<Self>(),
 			implicit: scale_info::meta_type::<Self::AdditionalSigned>()
@@ -1962,7 +1963,7 @@ pub trait AccountIdConversion<AccountId>: Sized {
 		Self::try_from_sub_account::<()>(a).map(|x| x.0)
 	}
 
-	/// Convert this value amalgamated with the a secondary "sub" value into an account ID,
+	/// Convert this value amalgamated with a secondary "sub" value into an account ID,
 	/// truncating any unused bytes. This is infallible.
 	///
 	/// NOTE: The account IDs from this and from `into_account` are *not* guaranteed to be distinct
diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs
index 28030d12fc9f3c390eddb492c2c88c37a5a2ed77..1fbaab0d45ac13bac0192383dcfcb7c942346ba7 100644
--- a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs
+++ b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs
@@ -111,7 +111,7 @@ where
 			info,
 			len,
 			self.implicit()?,
-			&(extension_version, call),
+			&TxBaseImplication((extension_version, call)),
 			source,
 		) {
 			// After validation, some origin must have been authorized.
diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
index f8c5dc6a724eb9e6f61935eff675af1e3096eb01..15be1e4c8e0a23bdd8f2dec892a3ad3f07808222 100644
--- a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
+++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
@@ -24,11 +24,12 @@ use crate::{
 	},
 	DispatchResult,
 };
+use alloc::vec::Vec;
 use codec::{Codec, Decode, Encode};
-use impl_trait_for_tuples::impl_for_tuples;
+use core::fmt::Debug;
 #[doc(hidden)]
-pub use sp_std::marker::PhantomData;
-use sp_std::{self, fmt::Debug, prelude::*};
+pub use core::marker::PhantomData;
+use impl_trait_for_tuples::impl_for_tuples;
 use sp_weights::Weight;
 use tuplex::{PopFront, PushBack};
 
@@ -43,6 +44,72 @@ mod dispatch_transaction;
 pub use as_transaction_extension::AsTransactionExtension;
 pub use dispatch_transaction::DispatchTransaction;
 
+/// Provides `Sealed` trait.
+mod private {
+	/// Special trait that prevents the implementation of some traits outside of this crate.
+	pub trait Sealed {}
+}
+
+/// The base implication in a transaction.
+///
+/// This struct is used to represent the base implication in the transaction, that is
+/// the implication not part of any transaction extensions. It usually comprises of the call and
+/// the transaction extension version.
+///
+/// The concept of implication in the transaction extension pipeline is explained in the trait
+/// documentation: [`TransactionExtension`].
+#[derive(Encode)]
+pub struct TxBaseImplication<T>(pub T);
+
+impl<T: Encode> Implication for TxBaseImplication<T> {
+	fn parts(&self) -> ImplicationParts<&impl Encode, &impl Encode, &impl Encode> {
+		ImplicationParts { base: self, explicit: &(), implicit: &() }
+	}
+}
+
+impl<T> private::Sealed for TxBaseImplication<T> {}
+
+/// The implication in a transaction.
+///
+/// The concept of implication in the transaction extension pipeline is explained in the trait
+/// documentation: [`TransactionExtension`].
+#[derive(Encode)]
+pub struct ImplicationParts<Base, Explicit, Implicit> {
+	/// The base implication, that is implication not part of any transaction extension, usually
+	/// the call and the transaction extension version.
+	pub base: Base,
+	/// The explicit implication in transaction extensions.
+	pub explicit: Explicit,
+	/// The implicit implication in transaction extensions.
+	pub implicit: Implicit,
+}
+
+impl<Base: Encode, Explicit: Encode, Implicit: Encode> Implication
+	for ImplicationParts<Base, Explicit, Implicit>
+{
+	fn parts(&self) -> ImplicationParts<&impl Encode, &impl Encode, &impl Encode> {
+		ImplicationParts { base: &self.base, explicit: &self.explicit, implicit: &self.implicit }
+	}
+}
+
+impl<Base, Explicit, Implicit> private::Sealed for ImplicationParts<Base, Explicit, Implicit> {}
+
+/// Interface of implications in the transaction extension pipeline.
+///
+/// Implications can be encoded, this is useful for checking signature on the implications.
+/// Implications can be split into parts, this allow to destructure and restructure the
+/// implications, this is useful for nested pipeline.
+///
+/// This trait is sealed, consider using [`TxBaseImplication`] and [`ImplicationParts`]
+/// implementations.
+///
+/// The concept of implication in the transaction extension pipeline is explained in the trait
+/// documentation: [`TransactionExtension`].
+pub trait Implication: Encode + private::Sealed {
+	/// Destructure the implication into its parts.
+	fn parts(&self) -> ImplicationParts<&impl Encode, &impl Encode, &impl Encode>;
+}
+
 /// Shortcut for the result value of the `validate` function.
 pub type ValidateResult<Val, Call> =
 	Result<(ValidTransaction, Val, DispatchOriginOf<Call>), TransactionValidityError>;
@@ -192,7 +259,7 @@ pub trait TransactionExtension<Call: Dispatchable>:
 	/// This method provides a default implementation that returns a vec containing a single
 	/// [`TransactionExtensionMetadata`].
 	fn metadata() -> Vec<TransactionExtensionMetadata> {
-		sp_std::vec![TransactionExtensionMetadata {
+		alloc::vec![TransactionExtensionMetadata {
 			identifier: Self::IDENTIFIER,
 			ty: scale_info::meta_type::<Self>(),
 			implicit: scale_info::meta_type::<Self::Implicit>()
@@ -244,7 +311,7 @@ pub trait TransactionExtension<Call: Dispatchable>:
 		info: &DispatchInfoOf<Call>,
 		len: usize,
 		self_implicit: Self::Implicit,
-		inherited_implication: &impl Encode,
+		inherited_implication: &impl Implication,
 		source: TransactionSource,
 	) -> ValidateResult<Self::Val, Call>;
 
@@ -421,7 +488,7 @@ pub trait TransactionExtension<Call: Dispatchable>:
 #[macro_export]
 macro_rules! impl_tx_ext_default {
 	($call:ty ; , $( $rest:tt )*) => {
-		impl_tx_ext_default!{$call ; $( $rest )*}
+		$crate::impl_tx_ext_default!{$call ; $( $rest )*}
 	};
 	($call:ty ; validate $( $rest:tt )*) => {
 		fn validate(
@@ -436,7 +503,7 @@ macro_rules! impl_tx_ext_default {
 		) -> $crate::traits::ValidateResult<Self::Val, $call> {
 			Ok((Default::default(), Default::default(), origin))
 		}
-		impl_tx_ext_default!{$call ; $( $rest )*}
+		$crate::impl_tx_ext_default!{$call ; $( $rest )*}
 	};
 	($call:ty ; prepare $( $rest:tt )*) => {
 		fn prepare(
@@ -449,13 +516,13 @@ macro_rules! impl_tx_ext_default {
 		) -> Result<Self::Pre, $crate::transaction_validity::TransactionValidityError> {
 			Ok(Default::default())
 		}
-		impl_tx_ext_default!{$call ; $( $rest )*}
+		$crate::impl_tx_ext_default!{$call ; $( $rest )*}
 	};
 	($call:ty ; weight $( $rest:tt )*) => {
 		fn weight(&self, _call: &$call) -> $crate::Weight {
 			$crate::Weight::zero()
 		}
-		impl_tx_ext_default!{$call ; $( $rest )*}
+		$crate::impl_tx_ext_default!{$call ; $( $rest )*}
 	};
 	($call:ty ;) => {};
 }
@@ -499,7 +566,7 @@ impl<Call: Dispatchable> TransactionExtension<Call> for Tuple {
 		info: &DispatchInfoOf<Call>,
 		len: usize,
 		self_implicit: Self::Implicit,
-		inherited_implication: &impl Encode,
+		inherited_implication: &impl Implication,
 		source: TransactionSource,
 	) -> Result<
 		(ValidTransaction, Self::Val, <Call as Dispatchable>::RuntimeOrigin),
@@ -510,23 +577,20 @@ impl<Call: Dispatchable> TransactionExtension<Call> for Tuple {
 		let following_explicit_implications = for_tuples!( ( #( &self.Tuple ),* ) );
 		let following_implicit_implications = self_implicit;
 
+		let implication_parts = inherited_implication.parts();
+
 		for_tuples!(#(
 			// Implication of this pipeline element not relevant for later items, so we pop it.
 			let (_item, following_explicit_implications) = following_explicit_implications.pop_front();
 			let (item_implicit, following_implicit_implications) = following_implicit_implications.pop_front();
 			let (item_valid, item_val, origin) = {
-				let implications = (
-					// The first is the implications born of the fact we return the mutated
-					// origin.
-					inherited_implication,
-					// This is the explicitly made implication born of the fact the new origin is
-					// passed into the next items in this pipeline-tuple.
-					&following_explicit_implications,
-					// This is the implicitly made implication born of the fact the new origin is
-					// passed into the next items in this pipeline-tuple.
-					&following_implicit_implications,
-				);
-				Tuple.validate(origin, call, info, len, item_implicit, &implications, source)?
+				Tuple.validate(origin, call, info, len, item_implicit,
+					&ImplicationParts {
+						base: implication_parts.base,
+						explicit: (&following_explicit_implications, implication_parts.explicit),
+						implicit: (&following_implicit_implications, implication_parts.implicit),
+					},
+					source)?
 			};
 			let valid = valid.combine_with(item_valid);
 			let val = val.push_back(item_val);
@@ -605,7 +669,7 @@ impl<Call: Dispatchable> TransactionExtension<Call> for Tuple {
 impl<Call: Dispatchable> TransactionExtension<Call> for () {
 	const IDENTIFIER: &'static str = "UnitTransactionExtension";
 	type Implicit = ();
-	fn implicit(&self) -> sp_std::result::Result<Self::Implicit, TransactionValidityError> {
+	fn implicit(&self) -> core::result::Result<Self::Implicit, TransactionValidityError> {
 		Ok(())
 	}
 	type Val = ();
@@ -620,7 +684,7 @@ impl<Call: Dispatchable> TransactionExtension<Call> for () {
 		_info: &DispatchInfoOf<Call>,
 		_len: usize,
 		_self_implicit: Self::Implicit,
-		_inherited_implication: &impl Encode,
+		_inherited_implication: &impl Implication,
 		_source: TransactionSource,
 	) -> Result<
 		(ValidTransaction, (), <Call as Dispatchable>::RuntimeOrigin),
@@ -639,3 +703,168 @@ impl<Call: Dispatchable> TransactionExtension<Call> for () {
 		Ok(())
 	}
 }
+
+#[cfg(test)]
+mod test {
+	use super::*;
+
+	#[test]
+	fn test_implications_on_nested_structure() {
+		use scale_info::TypeInfo;
+		use std::cell::RefCell;
+
+		#[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)]
+		struct MockExtension {
+			also_implicit: u8,
+			explicit: u8,
+		}
+
+		const CALL_IMPLICIT: u8 = 23;
+
+		thread_local! {
+			static COUNTER: RefCell<u8> = RefCell::new(1);
+		}
+
+		impl TransactionExtension<()> for MockExtension {
+			const IDENTIFIER: &'static str = "MockExtension";
+			type Implicit = u8;
+			fn implicit(&self) -> Result<Self::Implicit, TransactionValidityError> {
+				Ok(self.also_implicit)
+			}
+			type Val = ();
+			type Pre = ();
+			fn weight(&self, _call: &()) -> Weight {
+				Weight::zero()
+			}
+			fn prepare(
+				self,
+				_val: Self::Val,
+				_origin: &DispatchOriginOf<()>,
+				_call: &(),
+				_info: &DispatchInfoOf<()>,
+				_len: usize,
+			) -> Result<Self::Pre, TransactionValidityError> {
+				Ok(())
+			}
+			fn validate(
+				&self,
+				origin: DispatchOriginOf<()>,
+				_call: &(),
+				_info: &DispatchInfoOf<()>,
+				_len: usize,
+				self_implicit: Self::Implicit,
+				inherited_implication: &impl Implication,
+				_source: TransactionSource,
+			) -> ValidateResult<Self::Val, ()> {
+				COUNTER.with(|c| {
+					let mut counter = c.borrow_mut();
+
+					assert_eq!(self_implicit, *counter);
+					assert_eq!(
+						self,
+						&MockExtension { also_implicit: *counter, explicit: *counter + 1 }
+					);
+
+					// Implications must be call then 1 to 22 then 1 to 22 odd.
+					let mut assert_implications = Vec::new();
+					assert_implications.push(CALL_IMPLICIT);
+					for i in *counter + 2..23 {
+						assert_implications.push(i);
+					}
+					for i in *counter + 2..23 {
+						if i % 2 == 1 {
+							assert_implications.push(i);
+						}
+					}
+					assert_eq!(inherited_implication.encode(), assert_implications);
+
+					*counter += 2;
+				});
+				Ok((ValidTransaction::default(), (), origin))
+			}
+			fn post_dispatch_details(
+				_pre: Self::Pre,
+				_info: &DispatchInfoOf<()>,
+				_post_info: &PostDispatchInfoOf<()>,
+				_len: usize,
+				_result: &DispatchResult,
+			) -> Result<Weight, TransactionValidityError> {
+				Ok(Weight::zero())
+			}
+		}
+
+		// Test for one nested structure
+
+		let ext = (
+			MockExtension { also_implicit: 1, explicit: 2 },
+			MockExtension { also_implicit: 3, explicit: 4 },
+			(
+				MockExtension { also_implicit: 5, explicit: 6 },
+				MockExtension { also_implicit: 7, explicit: 8 },
+				(
+					MockExtension { also_implicit: 9, explicit: 10 },
+					MockExtension { also_implicit: 11, explicit: 12 },
+				),
+				MockExtension { also_implicit: 13, explicit: 14 },
+				MockExtension { also_implicit: 15, explicit: 16 },
+			),
+			MockExtension { also_implicit: 17, explicit: 18 },
+			(MockExtension { also_implicit: 19, explicit: 20 },),
+			MockExtension { also_implicit: 21, explicit: 22 },
+		);
+
+		let implicit = ext.implicit().unwrap();
+
+		let res = ext
+			.validate(
+				(),
+				&(),
+				&DispatchInfoOf::<()>::default(),
+				0,
+				implicit,
+				&TxBaseImplication(CALL_IMPLICIT),
+				TransactionSource::Local,
+			)
+			.expect("valid");
+
+		assert_eq!(res.0, ValidTransaction::default());
+
+		// Test for another nested structure
+
+		COUNTER.with(|c| {
+			*c.borrow_mut() = 1;
+		});
+
+		let ext = (
+			MockExtension { also_implicit: 1, explicit: 2 },
+			MockExtension { also_implicit: 3, explicit: 4 },
+			MockExtension { also_implicit: 5, explicit: 6 },
+			MockExtension { also_implicit: 7, explicit: 8 },
+			MockExtension { also_implicit: 9, explicit: 10 },
+			MockExtension { also_implicit: 11, explicit: 12 },
+			(
+				MockExtension { also_implicit: 13, explicit: 14 },
+				MockExtension { also_implicit: 15, explicit: 16 },
+				MockExtension { also_implicit: 17, explicit: 18 },
+				MockExtension { also_implicit: 19, explicit: 20 },
+				MockExtension { also_implicit: 21, explicit: 22 },
+			),
+		);
+
+		let implicit = ext.implicit().unwrap();
+
+		let res = ext
+			.validate(
+				(),
+				&(),
+				&DispatchInfoOf::<()>::default(),
+				0,
+				implicit,
+				&TxBaseImplication(CALL_IMPLICIT),
+				TransactionSource::Local,
+			)
+			.expect("valid");
+
+		assert_eq!(res.0, ValidTransaction::default());
+	}
+}
diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs
index 4b25f85fba68bd80a3c1040b45b213cfdad72563..df7570a185481c7c489b4dd07f027654410d509c 100644
--- a/substrate/primitives/storage/src/lib.rs
+++ b/substrate/primitives/storage/src/lib.rs
@@ -191,11 +191,15 @@ pub mod well_known_keys {
 	/// Wasm code of the runtime.
 	///
 	/// Stored as a raw byte vector. Required by substrate.
+	///
+	/// Encodes to `0x3A636F6465`.
 	pub const CODE: &[u8] = b":code";
 
 	/// Number of wasm linear memory pages required for execution of the runtime.
 	///
 	/// The type of this value is encoded `u64`.
+	///
+	/// Encodes to `0x307833413633364636343635`
 	pub const HEAP_PAGES: &[u8] = b":heappages";
 
 	/// Current extrinsic index (u32) is stored under this key.
diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs
index 491086bef49792fe25eff1a88b910549cb12f893..49dc6ba035c9e929d172d67d39b7ee7a97ac2f07 100644
--- a/substrate/test-utils/runtime/src/extrinsic.rs
+++ b/substrate/test-utils/runtime/src/extrinsic.rs
@@ -212,6 +212,7 @@ impl ExtrinsicBuilder {
 				self.metadata_hash
 					.map(CheckMetadataHash::new_with_custom_hash)
 					.unwrap_or_else(|| CheckMetadataHash::new(false)),
+				frame_system::WeightReclaim::new(),
 			);
 			let raw_payload = SignedPayload::from_raw(
 				self.function.clone(),
diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs
index 666776865316be6c818fcc23b81937959d25b8b2..4d24354f99a7eb8f55e499205a8038eec0ccc4e0 100644
--- a/substrate/test-utils/runtime/src/lib.rs
+++ b/substrate/test-utils/runtime/src/lib.rs
@@ -155,6 +155,7 @@ pub type TxExtension = (
 	(CheckNonce<Runtime>, CheckWeight<Runtime>),
 	CheckSubstrateCall,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 /// The payload being signed in transactions.
 pub type SignedPayload = sp_runtime::generic::SignedPayload<RuntimeCall, TxExtension>;
diff --git a/substrate/utils/frame/omni-bencher/src/main.rs b/substrate/utils/frame/omni-bencher/src/main.rs
index 7d8aa891dc4a0c77f6a04f81988701f31e19dd6d..f0f9ab753b074e7084a702441ee9fadf8c7969fe 100644
--- a/substrate/utils/frame/omni-bencher/src/main.rs
+++ b/substrate/utils/frame/omni-bencher/src/main.rs
@@ -24,8 +24,6 @@ use tracing_subscriber::EnvFilter;
 fn main() -> Result<()> {
 	setup_logger();
 
-	log::warn!("The FRAME omni-bencher is not yet battle tested - double check the results.",);
-
 	command::Command::parse().run()
 }
 
diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs
index 75a2ac2aef41b7920a66107a96a654191d1d24f7..4c49663260bb6ec4f5bc27e55c47848dd3ce5eeb 100644
--- a/substrate/utils/frame/remote-externalities/src/lib.rs
+++ b/substrate/utils/frame/remote-externalities/src/lib.rs
@@ -20,6 +20,8 @@
 //! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate
 //! based chain, or a local state snapshot file.
 
+mod logging;
+
 use codec::{Compact, Decode, Encode};
 use indicatif::{ProgressBar, ProgressStyle};
 use jsonrpsee::{core::params::ArrayParams, http_client::HttpClient};
@@ -37,7 +39,6 @@ use sp_runtime::{
 	StateVersion,
 };
 use sp_state_machine::TestExternalities;
-use spinners::{Spinner, Spinners};
 use std::{
 	cmp::{max, min},
 	fs,
@@ -49,6 +50,8 @@ use std::{
 use substrate_rpc_client::{rpc_params, BatchRequestBuilder, ChainApi, ClientT, StateApi};
 use tokio_retry::{strategy::FixedInterval, Retry};
 
+type Result<T, E = &'static str> = std::result::Result<T, E>;
+
 type KeyValue = (StorageKey, StorageData);
 type TopKeyValues = Vec<KeyValue>;
 type ChildKeyValues = Vec<(ChildInfo, Vec<KeyValue>)>;
@@ -87,7 +90,7 @@ impl<B: BlockT> Snapshot<B> {
 		}
 	}
 
-	fn load(path: &PathBuf) -> Result<Snapshot<B>, &'static str> {
+	fn load(path: &PathBuf) -> Result<Snapshot<B>> {
 		let bytes = fs::read(path).map_err(|_| "fs::read failed.")?;
 		// The first item in the SCALE encoded struct bytes is the snapshot version. We decode and
 		// check that first, before proceeding to decode the rest of the snapshot.
@@ -168,9 +171,9 @@ impl Transport {
 	}
 
 	// Build an HttpClient from a URI.
-	async fn init(&mut self) -> Result<(), &'static str> {
+	async fn init(&mut self) -> Result<()> {
 		if let Self::Uri(uri) = self {
-			log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", uri);
+			debug!(target: LOG_TARGET, "initializing remote client to {uri:?}");
 
 			// If we have a ws uri, try to convert it to an http uri.
 			// We use an HTTP client rather than WS because WS starts to choke with "accumulated
@@ -178,11 +181,11 @@ impl Transport {
 			// from a node running a default configuration.
 			let uri = if uri.starts_with("ws://") {
 				let uri = uri.replace("ws://", "http://");
-				log::info!(target: LOG_TARGET, "replacing ws:// in uri with http://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri);
+				info!(target: LOG_TARGET, "replacing ws:// in uri with http://: {uri:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)");
 				uri
 			} else if uri.starts_with("wss://") {
 				let uri = uri.replace("wss://", "https://");
-				log::info!(target: LOG_TARGET, "replacing wss:// in uri with https://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri);
+				info!(target: LOG_TARGET, "replacing wss:// in uri with https://: {uri:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)");
 				uri
 			} else {
 				uri.clone()
@@ -193,7 +196,7 @@ impl Transport {
 				.request_timeout(std::time::Duration::from_secs(60 * 5))
 				.build(uri)
 				.map_err(|e| {
-					log::error!(target: LOG_TARGET, "error: {:?}", e);
+					error!(target: LOG_TARGET, "error: {e:?}");
 					"failed to build http client"
 				})?;
 
@@ -364,23 +367,23 @@ where
 		&self,
 		key: StorageKey,
 		maybe_at: Option<B::Hash>,
-	) -> Result<Option<StorageData>, &'static str> {
+	) -> Result<Option<StorageData>> {
 		trace!(target: LOG_TARGET, "rpc: get_storage");
 		self.as_online().rpc_client().storage(key, maybe_at).await.map_err(|e| {
-			error!(target: LOG_TARGET, "Error = {:?}", e);
+			error!(target: LOG_TARGET, "Error = {e:?}");
 			"rpc get_storage failed."
 		})
 	}
 
 	/// Get the latest finalized head.
-	async fn rpc_get_head(&self) -> Result<B::Hash, &'static str> {
+	async fn rpc_get_head(&self) -> Result<B::Hash> {
 		trace!(target: LOG_TARGET, "rpc: finalized_head");
 
 		// sadly this pretty much unreadable...
 		ChainApi::<(), _, B::Header, ()>::finalized_head(self.as_online().rpc_client())
 			.await
 			.map_err(|e| {
-				error!(target: LOG_TARGET, "Error = {:?}", e);
+				error!(target: LOG_TARGET, "Error = {e:?}");
 				"rpc finalized_head failed."
 			})
 	}
@@ -390,13 +393,13 @@ where
 		prefix: Option<StorageKey>,
 		start_key: Option<StorageKey>,
 		at: B::Hash,
-	) -> Result<Vec<StorageKey>, &'static str> {
+	) -> Result<Vec<StorageKey>> {
 		self.as_online()
 			.rpc_client()
 			.storage_keys_paged(prefix, Self::DEFAULT_KEY_DOWNLOAD_PAGE, start_key, Some(at))
 			.await
 			.map_err(|e| {
-				error!(target: LOG_TARGET, "Error = {:?}", e);
+				error!(target: LOG_TARGET, "Error = {e:?}");
 				"rpc get_keys failed"
 			})
 	}
@@ -407,7 +410,7 @@ where
 		prefix: &StorageKey,
 		block: B::Hash,
 		parallel: usize,
-	) -> Result<Vec<StorageKey>, &'static str> {
+	) -> Result<Vec<StorageKey>> {
 		/// Divide the workload and return the start key of each chunks. Guaranteed to return a
 		/// non-empty list.
 		fn gen_start_keys(prefix: &StorageKey) -> Vec<StorageKey> {
@@ -491,7 +494,7 @@ where
 		block: B::Hash,
 		start_key: Option<&StorageKey>,
 		end_key: Option<&StorageKey>,
-	) -> Result<Vec<StorageKey>, &'static str> {
+	) -> Result<Vec<StorageKey>> {
 		let mut last_key: Option<&StorageKey> = start_key;
 		let mut keys: Vec<StorageKey> = vec![];
 
@@ -518,11 +521,11 @@ where
 			// scraping out of range or no more matches,
 			// we are done either way
 			if page_len < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize {
-				log::debug!(target: LOG_TARGET, "last page received: {}", page_len);
+				debug!(target: LOG_TARGET, "last page received: {page_len}");
 				break
 			}
 
-			log::debug!(
+			debug!(
 				target: LOG_TARGET,
 				"new total = {}, full page received: {}",
 				keys.len(),
@@ -589,11 +592,10 @@ where
 		let total_payloads = payloads.len();
 
 		while start_index < total_payloads {
-			log::debug!(
+			debug!(
 				target: LOG_TARGET,
-				"Remaining payloads: {} Batch request size: {}",
+				"Remaining payloads: {} Batch request size: {batch_size}",
 				total_payloads - start_index,
-				batch_size,
 			);
 
 			let end_index = usize::min(start_index + batch_size, total_payloads);
@@ -620,18 +622,16 @@ where
 
 					retries += 1;
 					let failure_log = format!(
-						"Batch request failed ({}/{} retries). Error: {}",
-						retries,
-						Self::MAX_RETRIES,
-						e
+						"Batch request failed ({retries}/{} retries). Error: {e}",
+						Self::MAX_RETRIES
 					);
 					// after 2 subsequent failures something very wrong is happening. log a warning
 					// and reset the batch size down to 1.
 					if retries >= 2 {
-						log::warn!("{}", failure_log);
+						warn!("{failure_log}");
 						batch_size = 1;
 					} else {
-						log::debug!("{}", failure_log);
+						debug!("{failure_log}");
 						// Decrease batch size by DECREASE_FACTOR
 						batch_size =
 							(batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize;
@@ -655,13 +655,11 @@ where
 				)
 			};
 
-			log::debug!(
+			debug!(
 				target: LOG_TARGET,
-				"Request duration: {:?} Target duration: {:?} Last batch size: {} Next batch size: {}",
-				request_duration,
+				"Request duration: {request_duration:?} Target duration: {:?} Last batch size: {} Next batch size: {batch_size}",
 				Self::REQUEST_DURATION_TARGET,
 				end_index - start_index,
-				batch_size
 			);
 
 			let batch_response_len = batch_response.len();
@@ -689,21 +687,24 @@ where
 		prefix: StorageKey,
 		at: B::Hash,
 		pending_ext: &mut TestExternalities<HashingFor<B>>,
-	) -> Result<Vec<KeyValue>, &'static str> {
-		let start = Instant::now();
-		let mut sp = Spinner::with_timer(Spinners::Dots, "Scraping keys...".into());
-		// TODO We could start downloading when having collected the first batch of keys
-		// https://github.com/paritytech/polkadot-sdk/issues/2494
-		let keys = self
-			.rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS)
-			.await?
-			.into_iter()
-			.collect::<Vec<_>>();
-		sp.stop_with_message(format!(
-			"✅ Found {} keys ({:.2}s)",
-			keys.len(),
-			start.elapsed().as_secs_f32()
-		));
+	) -> Result<Vec<KeyValue>> {
+		let keys = logging::with_elapsed_async(
+			|| async {
+				// TODO: We could start downloading when having collected the first batch of keys.
+				// https://github.com/paritytech/polkadot-sdk/issues/2494
+				let keys = self
+					.rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS)
+					.await?
+					.into_iter()
+					.collect::<Vec<_>>();
+
+				Ok(keys)
+			},
+			"Scraping keys...",
+			|keys| format!("Found {} keys", keys.len()),
+		)
+		.await?;
+
 		if keys.is_empty() {
 			return Ok(Default::default())
 		}
@@ -735,7 +736,7 @@ where
 		let storage_data = match storage_data_result {
 			Ok(storage_data) => storage_data.into_iter().flatten().collect::<Vec<_>>(),
 			Err(e) => {
-				log::error!(target: LOG_TARGET, "Error while getting storage data: {}", e);
+				error!(target: LOG_TARGET, "Error while getting storage data: {e}");
 				return Err("Error while getting storage data")
 			},
 		};
@@ -751,27 +752,31 @@ where
 			.map(|(key, maybe_value)| match maybe_value {
 				Some(data) => (key.clone(), data),
 				None => {
-					log::warn!(target: LOG_TARGET, "key {:?} had none corresponding value.", &key);
+					warn!(target: LOG_TARGET, "key {key:?} had none corresponding value.");
 					let data = StorageData(vec![]);
 					(key.clone(), data)
 				},
 			})
 			.collect::<Vec<_>>();
 
-		let mut sp = Spinner::with_timer(Spinners::Dots, "Inserting keys into DB...".into());
-		let start = Instant::now();
-		pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| {
-			// Don't insert the child keys here, they need to be inserted separately with all their
-			// data in the load_child_remote function.
-			match is_default_child_storage_key(&k.0) {
-				true => None,
-				false => Some((k.0, v.0)),
-			}
-		}));
-		sp.stop_with_message(format!(
-			"✅ Inserted keys into DB ({:.2}s)",
-			start.elapsed().as_secs_f32()
-		));
+		logging::with_elapsed(
+			|| {
+				pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| {
+					// Don't insert the child keys here, they need to be inserted separately with
+					// all their data in the load_child_remote function.
+					match is_default_child_storage_key(&k.0) {
+						true => None,
+						false => Some((k.0, v.0)),
+					}
+				}));
+
+				Ok(())
+			},
+			"Inserting keys into DB...",
+			|_| "Inserted keys into DB".into(),
+		)
+		.expect("must succeed; qed");
+
 		Ok(key_values)
 	}
 
@@ -781,7 +786,7 @@ where
 		prefixed_top_key: &StorageKey,
 		child_keys: Vec<StorageKey>,
 		at: B::Hash,
-	) -> Result<Vec<KeyValue>, &'static str> {
+	) -> Result<Vec<KeyValue>> {
 		let child_keys_len = child_keys.len();
 
 		let payloads = child_keys
@@ -803,7 +808,7 @@ where
 			match Self::get_storage_data_dynamic_batch_size(client, payloads, &bar).await {
 				Ok(storage_data) => storage_data,
 				Err(e) => {
-					log::error!(target: LOG_TARGET, "batch processing failed: {:?}", e);
+					error!(target: LOG_TARGET, "batch processing failed: {e:?}");
 					return Err("batch processing failed")
 				},
 			};
@@ -816,7 +821,7 @@ where
 			.map(|(key, maybe_value)| match maybe_value {
 				Some(v) => (key.clone(), v),
 				None => {
-					log::warn!(target: LOG_TARGET, "key {:?} had no corresponding value.", &key);
+					warn!(target: LOG_TARGET, "key {key:?} had no corresponding value.");
 					(key.clone(), StorageData(vec![]))
 				},
 			})
@@ -828,7 +833,7 @@ where
 		prefixed_top_key: &StorageKey,
 		child_prefix: StorageKey,
 		at: B::Hash,
-	) -> Result<Vec<StorageKey>, &'static str> {
+	) -> Result<Vec<StorageKey>> {
 		let retry_strategy =
 			FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES);
 		let mut all_child_keys = Vec::new();
@@ -850,7 +855,7 @@ where
 			let child_keys = Retry::spawn(retry_strategy.clone(), get_child_keys_closure)
 				.await
 				.map_err(|e| {
-					error!(target: LOG_TARGET, "Error = {:?}", e);
+					error!(target: LOG_TARGET, "Error = {e:?}");
 					"rpc child_get_keys failed."
 				})?;
 
@@ -896,7 +901,7 @@ where
 		&self,
 		top_kv: &[KeyValue],
 		pending_ext: &mut TestExternalities<HashingFor<B>>,
-	) -> Result<ChildKeyValues, &'static str> {
+	) -> Result<ChildKeyValues> {
 		let child_roots = top_kv
 			.iter()
 			.filter(|(k, _)| is_default_child_storage_key(k.as_ref()))
@@ -904,7 +909,7 @@ where
 			.collect::<Vec<_>>();
 
 		if child_roots.is_empty() {
-			info!(target: LOG_TARGET, "👩‍👦 no child roots found to scrape",);
+			info!(target: LOG_TARGET, "👩‍👦 no child roots found to scrape");
 			return Ok(Default::default())
 		}
 
@@ -930,7 +935,7 @@ where
 			let un_prefixed = match ChildType::from_prefixed_key(&prefixed_top_key) {
 				Some((ChildType::ParentKeyId, storage_key)) => storage_key,
 				None => {
-					log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key);
+					error!(target: LOG_TARGET, "invalid key: {prefixed_top_key:?}");
 					return Err("Invalid child key")
 				},
 			};
@@ -954,13 +959,13 @@ where
 	async fn load_top_remote(
 		&self,
 		pending_ext: &mut TestExternalities<HashingFor<B>>,
-	) -> Result<TopKeyValues, &'static str> {
+	) -> Result<TopKeyValues> {
 		let config = self.as_online();
 		let at = self
 			.as_online()
 			.at
 			.expect("online config must be initialized by this point; qed.");
-		log::info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {:?}", at);
+		info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {at:?}");
 
 		let mut keys_and_values = Vec::new();
 		for prefix in &config.hashed_prefixes {
@@ -968,7 +973,7 @@ where
 			let additional_key_values =
 				self.rpc_get_pairs(StorageKey(prefix.to_vec()), at, pending_ext).await?;
 			let elapsed = now.elapsed();
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"adding data for hashed prefix: {:?}, took {:.2}s",
 				HexDisplay::from(prefix),
@@ -979,7 +984,7 @@ where
 
 		for key in &config.hashed_keys {
 			let key = StorageKey(key.to_vec());
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"adding data for hashed key: {:?}",
 				HexDisplay::from(&key)
@@ -990,7 +995,7 @@ where
 					keys_and_values.push((key, value));
 				},
 				None => {
-					log::warn!(
+					warn!(
 						target: LOG_TARGET,
 						"no data found for hashed key: {:?}",
 						HexDisplay::from(&key)
@@ -1005,17 +1010,16 @@ where
 	/// The entry point of execution, if `mode` is online.
 	///
 	/// initializes the remote client in `transport`, and sets the `at` field, if not specified.
-	async fn init_remote_client(&mut self) -> Result<(), &'static str> {
+	async fn init_remote_client(&mut self) -> Result<()> {
 		// First, initialize the http client.
 		self.as_online_mut().transport.init().await?;
 
 		// Then, if `at` is not set, set it.
 		if self.as_online().at.is_none() {
 			let at = self.rpc_get_head().await?;
-			log::info!(
+			info!(
 				target: LOG_TARGET,
-				"since no at is provided, setting it to latest finalized head, {:?}",
-				at
+				"since no at is provided, setting it to latest finalized head, {at:?}",
 			);
 			self.as_online_mut().at = Some(at);
 		}
@@ -1040,7 +1044,7 @@ where
 			.filter(|p| *p != DEFAULT_CHILD_STORAGE_KEY_PREFIX)
 			.count() == 0
 		{
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"since no prefix is filtered, the data for all pallets will be downloaded"
 			);
@@ -1050,7 +1054,7 @@ where
 		Ok(())
 	}
 
-	async fn load_header(&self) -> Result<B::Header, &'static str> {
+	async fn load_header(&self) -> Result<B::Header> {
 		let retry_strategy =
 			FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES);
 		let get_header_closure = || {
@@ -1069,14 +1073,12 @@ where
 	/// `load_child_remote`.
 	///
 	/// Must be called after `init_remote_client`.
-	async fn load_remote_and_maybe_save(
-		&mut self,
-	) -> Result<TestExternalities<HashingFor<B>>, &'static str> {
+	async fn load_remote_and_maybe_save(&mut self) -> Result<TestExternalities<HashingFor<B>>> {
 		let state_version =
 			StateApi::<B::Hash>::runtime_version(self.as_online().rpc_client(), None)
 				.await
 				.map_err(|e| {
-					error!(target: LOG_TARGET, "Error = {:?}", e);
+					error!(target: LOG_TARGET, "Error = {e:?}");
 					"rpc runtime_version failed."
 				})
 				.map(|v| v.state_version())?;
@@ -1100,11 +1102,10 @@ where
 				self.load_header().await?,
 			);
 			let encoded = snapshot.encode();
-			log::info!(
+			info!(
 				target: LOG_TARGET,
-				"writing snapshot of {} bytes to {:?}",
+				"writing snapshot of {} bytes to {path:?}",
 				encoded.len(),
-				path
 			);
 			std::fs::write(path, encoded).map_err(|_| "fs::write failed")?;
 
@@ -1119,33 +1120,35 @@ where
 		Ok(pending_ext)
 	}
 
-	async fn do_load_remote(&mut self) -> Result<RemoteExternalities<B>, &'static str> {
+	async fn do_load_remote(&mut self) -> Result<RemoteExternalities<B>> {
 		self.init_remote_client().await?;
 		let inner_ext = self.load_remote_and_maybe_save().await?;
 		Ok(RemoteExternalities { header: self.load_header().await?, inner_ext })
 	}
 
-	fn do_load_offline(
-		&mut self,
-		config: OfflineConfig,
-	) -> Result<RemoteExternalities<B>, &'static str> {
-		let mut sp = Spinner::with_timer(Spinners::Dots, "Loading snapshot...".into());
-		let start = Instant::now();
-		info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path);
-		let Snapshot { snapshot_version: _, header, state_version, raw_storage, storage_root } =
-			Snapshot::<B>::load(&config.state_snapshot.path)?;
-
-		let inner_ext = TestExternalities::from_raw_snapshot(
-			raw_storage,
-			storage_root,
-			self.overwrite_state_version.unwrap_or(state_version),
-		);
-		sp.stop_with_message(format!("✅ Loaded snapshot ({:.2}s)", start.elapsed().as_secs_f32()));
+	fn do_load_offline(&mut self, config: OfflineConfig) -> Result<RemoteExternalities<B>> {
+		let (header, inner_ext) = logging::with_elapsed(
+			|| {
+				info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path);
+
+				let Snapshot { header, state_version, raw_storage, storage_root, .. } =
+					Snapshot::<B>::load(&config.state_snapshot.path)?;
+				let inner_ext = TestExternalities::from_raw_snapshot(
+					raw_storage,
+					storage_root,
+					self.overwrite_state_version.unwrap_or(state_version),
+				);
+
+				Ok((header, inner_ext))
+			},
+			"Loading snapshot...",
+			|_| "Loaded snapshot".into(),
+		)?;
 
 		Ok(RemoteExternalities { inner_ext, header })
 	}
 
-	pub(crate) async fn pre_build(mut self) -> Result<RemoteExternalities<B>, &'static str> {
+	pub(crate) async fn pre_build(mut self) -> Result<RemoteExternalities<B>> {
 		let mut ext = match self.mode.clone() {
 			Mode::Offline(config) => self.do_load_offline(config)?,
 			Mode::Online(_) => self.do_load_remote().await?,
@@ -1159,7 +1162,7 @@ where
 
 		// inject manual key values.
 		if !self.hashed_key_values.is_empty() {
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"extending externalities with {} manually injected key-values",
 				self.hashed_key_values.len()
@@ -1169,7 +1172,7 @@ where
 
 		// exclude manual key values.
 		if !self.hashed_blacklist.is_empty() {
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"excluding externalities from {} keys",
 				self.hashed_blacklist.len()
@@ -1221,7 +1224,7 @@ where
 		self
 	}
 
-	pub async fn build(self) -> Result<RemoteExternalities<B>, &'static str> {
+	pub async fn build(self) -> Result<RemoteExternalities<B>> {
 		let mut ext = self.pre_build().await?;
 		ext.commit_all().unwrap();
 
diff --git a/substrate/utils/frame/remote-externalities/src/logging.rs b/substrate/utils/frame/remote-externalities/src/logging.rs
new file mode 100644
index 0000000000000000000000000000000000000000..7ab901c004de2b65d42b343d2f73297d215c4720
--- /dev/null
+++ b/substrate/utils/frame/remote-externalities/src/logging.rs
@@ -0,0 +1,86 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::{
+	future::Future,
+	io::{self, IsTerminal},
+	time::Instant,
+};
+
+use spinners::{Spinner, Spinners};
+
+use super::Result;
+
+// A simple helper to time a operation with a nice spinner, start message, and end message.
+//
+// The spinner is only displayed when stdout is a terminal.
+pub(super) fn with_elapsed<F, R, EndMsg>(f: F, start_msg: &str, end_msg: EndMsg) -> Result<R>
+where
+	F: FnOnce() -> Result<R>,
+	EndMsg: FnOnce(&R) -> String,
+{
+	let timer = Instant::now();
+	let mut maybe_sp = start(start_msg);
+
+	Ok(end(f()?, timer, maybe_sp.as_mut(), end_msg))
+}
+
+// A simple helper to time an async operation with a nice spinner, start message, and end message.
+//
+// The spinner is only displayed when stdout is a terminal.
+pub(super) async fn with_elapsed_async<F, Fut, R, EndMsg>(
+	f: F,
+	start_msg: &str,
+	end_msg: EndMsg,
+) -> Result<R>
+where
+	F: FnOnce() -> Fut,
+	Fut: Future<Output = Result<R>>,
+	EndMsg: FnOnce(&R) -> String,
+{
+	let timer = Instant::now();
+	let mut maybe_sp = start(start_msg);
+
+	Ok(end(f().await?, timer, maybe_sp.as_mut(), end_msg))
+}
+
+fn start(start_msg: &str) -> Option<Spinner> {
+	let msg = format!("⏳ {start_msg}");
+
+	if io::stdout().is_terminal() {
+		Some(Spinner::new(Spinners::Dots, msg))
+	} else {
+		println!("{msg}");
+
+		None
+	}
+}
+
+fn end<T, EndMsg>(val: T, timer: Instant, maybe_sp: Option<&mut Spinner>, end_msg: EndMsg) -> T
+where
+	EndMsg: FnOnce(&T) -> String,
+{
+	let msg = format!("✅ {} in {:.2}s", end_msg(&val), timer.elapsed().as_secs_f32());
+
+	if let Some(sp) = maybe_sp {
+		sp.stop_with_message(msg);
+	} else {
+		println!("{msg}");
+	}
+
+	val
+}
diff --git a/substrate/utils/prometheus/src/lib.rs b/substrate/utils/prometheus/src/lib.rs
index 5edac2e6650f528c5f138c800fa3c9b350be52c5..ae39cb4a7dd319f952cbe7a2e54eb3cdeffd54f1 100644
--- a/substrate/utils/prometheus/src/lib.rs
+++ b/substrate/utils/prometheus/src/lib.rs
@@ -87,7 +87,7 @@ async fn request_metrics(
 /// to serve metrics.
 pub async fn init_prometheus(prometheus_addr: SocketAddr, registry: Registry) -> Result<(), Error> {
 	let listener = tokio::net::TcpListener::bind(&prometheus_addr).await.map_err(|e| {
-		log::error!(target: "prometheus", "Error binding to '{:#?}': {:#?}", prometheus_addr, e);
+		log::error!(target: "prometheus", "Error binding to '{prometheus_addr:?}': {e:?}");
 		Error::PortInUse(prometheus_addr)
 	})?;
 
diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs
index 72eded5bfd135c9a779f3e85b9328bbb69b17457..972c7500f3993f5a362e422e300262025146ce1c 100644
--- a/templates/minimal/runtime/src/lib.rs
+++ b/templates/minimal/runtime/src/lib.rs
@@ -118,6 +118,10 @@ type TxExtension = (
 	// Ensures that the sender has enough funds to pay for the transaction
 	// and deducts the fee from the sender's account.
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	// Reclaim the unused weight from the block using post dispatch information.
+	// It must be last in the pipeline in order to catch the refund in previous transaction
+	// extensions
+	frame_system::WeightReclaim<Runtime>,
 );
 
 // Composes the runtime by adding all the used pallets and deriving necessary types.
diff --git a/templates/parachain/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs
index 9295492bc20bc919fa7f728260a25e9f0de31005..4d6dd5642a1ed3314e153412e1baf24cd0f23781 100644
--- a/templates/parachain/pallets/template/src/weights.rs
+++ b/templates/parachain/pallets/template/src/weights.rs
@@ -39,6 +39,12 @@ pub trait WeightInfo {
 }
 
 /// Weights for pallet_template using the Substrate node and recommended hardware.
+#[cfg_attr(
+    not(feature = "std"),
+    deprecated(
+        note = "SubstrateWeight is auto-generated and should not be used in production. Replace it with runtime benchmarked weights."
+    )
+)]
 pub struct SubstrateWeight<T>(PhantomData<T>);
 impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Storage: Template Something (r:0 w:1)
diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml
index 9a0548106ed7b93d5f750396b98f3acdc7d69662..83d7bf4c9b72d60e37977411f0b5f8c6d5363c16 100644
--- a/templates/parachain/runtime/Cargo.toml
+++ b/templates/parachain/runtime/Cargo.toml
@@ -48,11 +48,11 @@ polkadot-sdk = { workspace = true, default-features = false, features = [
 
 	"cumulus-pallet-aura-ext",
 	"cumulus-pallet-session-benchmarking",
+	"cumulus-pallet-weight-reclaim",
 	"cumulus-pallet-xcm",
 	"cumulus-pallet-xcmp-queue",
 	"cumulus-primitives-aura",
 	"cumulus-primitives-core",
-	"cumulus-primitives-storage-weight-reclaim",
 	"cumulus-primitives-utility",
 	"pallet-collator-selection",
 	"parachains-common",
diff --git a/templates/parachain/runtime/src/benchmarks.rs b/templates/parachain/runtime/src/benchmarks.rs
index aae50e7258c0616248a5b11d3efab88e4258dba9..ca9d423bf856b12f8226e3f9a81ade4f059939f4 100644
--- a/templates/parachain/runtime/src/benchmarks.rs
+++ b/templates/parachain/runtime/src/benchmarks.rs
@@ -33,4 +33,5 @@ polkadot_sdk::frame_benchmarking::define_benchmarks!(
 	[pallet_collator_selection, CollatorSelection]
 	[cumulus_pallet_parachain_system, ParachainSystem]
 	[cumulus_pallet_xcmp_queue, XcmpQueue]
+	[cumulus_pallet_weight_reclaim, WeightReclaim]
 );
diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs
index ba4c71c7f218705c4e5035d1fcac67fce10e2485..1e9155f59a57a8c9f5f34ef946fd3b39113e7a74 100644
--- a/templates/parachain/runtime/src/configs/mod.rs
+++ b/templates/parachain/runtime/src/configs/mod.rs
@@ -129,6 +129,11 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+/// Configure the palelt weight reclaim tx.
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = ();
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs
index 9669237af785af680e1c912e5775d613f023d2e1..0be27ecce73945a6fbda409f7d19994493d9be0f 100644
--- a/templates/parachain/runtime/src/lib.rs
+++ b/templates/parachain/runtime/src/lib.rs
@@ -75,18 +75,20 @@ pub type BlockId = generic::BlockId<Block>;
 
 /// The extension to the basic transaction logic.
 #[docify::export(template_signed_extra)]
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -272,6 +274,8 @@ mod runtime {
 	pub type Timestamp = pallet_timestamp;
 	#[runtime::pallet_index(3)]
 	pub type ParachainInfo = parachain_info;
+	#[runtime::pallet_index(4)]
+	pub type WeightReclaim = cumulus_pallet_weight_reclaim;
 
 	// Monetary stuff.
 	#[runtime::pallet_index(10)]
diff --git a/templates/solochain/node/src/benchmarking.rs b/templates/solochain/node/src/benchmarking.rs
index 0d60230cd19c0347ca54bd0feae8a5ca7f2da66c..467cad4c0aaa50c3445e5370a2fbc12ba4482e97 100644
--- a/templates/solochain/node/src/benchmarking.rs
+++ b/templates/solochain/node/src/benchmarking.rs
@@ -122,6 +122,7 @@ pub fn create_benchmark_extrinsic(
 		frame_system::CheckWeight::<runtime::Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(0),
 		frame_metadata_hash_extension::CheckMetadataHash::<runtime::Runtime>::new(false),
+		frame_system::WeightReclaim::<runtime::Runtime>::new(),
 	);
 
 	let raw_payload = runtime::SignedPayload::from_raw(
@@ -137,6 +138,7 @@ pub fn create_benchmark_extrinsic(
 			(),
 			(),
 			None,
+			(),
 		),
 	);
 	let signature = raw_payload.using_encoded(|e| sender.sign(e));
diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs
index ae0ea16ae42ef6122d1472677f5a23fe769e465d..6a2149ec8b637c2b253a9327abd81260259cf150 100644
--- a/templates/solochain/runtime/src/lib.rs
+++ b/templates/solochain/runtime/src/lib.rs
@@ -157,6 +157,7 @@ pub type TxExtension = (
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 /// Unchecked extrinsic type as expected by this runtime.
diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml
index cf7a97c40a4eeceaa23366274ff73f931eb1630c..fc0b2d5a140ed18419a3cece320f03f6fab8b711 100644
--- a/umbrella/Cargo.toml
+++ b/umbrella/Cargo.toml
@@ -29,6 +29,7 @@ std = [
 	"cumulus-pallet-parachain-system?/std",
 	"cumulus-pallet-session-benchmarking?/std",
 	"cumulus-pallet-solo-to-para?/std",
+	"cumulus-pallet-weight-reclaim?/std",
 	"cumulus-pallet-xcm?/std",
 	"cumulus-pallet-xcmp-queue?/std",
 	"cumulus-ping?/std",
@@ -240,6 +241,7 @@ runtime-benchmarks = [
 	"cumulus-pallet-dmp-queue?/runtime-benchmarks",
 	"cumulus-pallet-parachain-system?/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking?/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim?/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue?/runtime-benchmarks",
 	"cumulus-primitives-core?/runtime-benchmarks",
 	"cumulus-primitives-utility?/runtime-benchmarks",
@@ -292,7 +294,6 @@ runtime-benchmarks = [
 	"pallet-membership?/runtime-benchmarks",
 	"pallet-message-queue?/runtime-benchmarks",
 	"pallet-migrations?/runtime-benchmarks",
-	"pallet-mixnet?/runtime-benchmarks",
 	"pallet-mmr?/runtime-benchmarks",
 	"pallet-multisig?/runtime-benchmarks",
 	"pallet-nft-fractionalization?/runtime-benchmarks",
@@ -372,6 +373,7 @@ try-runtime = [
 	"cumulus-pallet-dmp-queue?/try-runtime",
 	"cumulus-pallet-parachain-system?/try-runtime",
 	"cumulus-pallet-solo-to-para?/try-runtime",
+	"cumulus-pallet-weight-reclaim?/try-runtime",
 	"cumulus-pallet-xcm?/try-runtime",
 	"cumulus-pallet-xcmp-queue?/try-runtime",
 	"cumulus-ping?/try-runtime",
@@ -544,7 +546,7 @@ with-tracing = [
 	"sp-tracing?/with-tracing",
 	"sp-tracing?/with-tracing",
 ]
-runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
+runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
 runtime = [
 	"frame-benchmarking",
 	"frame-benchmarking-pallet-pov",
@@ -726,6 +728,11 @@ default-features = false
 optional = true
 path = "../cumulus/pallets/solo-to-para"
 
+[dependencies.cumulus-pallet-weight-reclaim]
+default-features = false
+optional = true
+path = "../cumulus/pallets/weight-reclaim"
+
 [dependencies.cumulus-pallet-xcm]
 default-features = false
 optional = true
diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs
index 9142f318442123e430e7dbd3ef7a34b415d0529d..a132f16a2c33f617ee1a916deb585534d3e6f99c 100644
--- a/umbrella/src/lib.rs
+++ b/umbrella/src/lib.rs
@@ -141,6 +141,10 @@ pub use cumulus_pallet_session_benchmarking;
 #[cfg(feature = "cumulus-pallet-solo-to-para")]
 pub use cumulus_pallet_solo_to_para;
 
+/// pallet and transaction extensions for accurate proof size reclaim.
+#[cfg(feature = "cumulus-pallet-weight-reclaim")]
+pub use cumulus_pallet_weight_reclaim;
+
 /// Pallet for stuff specific to parachains' usage of XCM.
 #[cfg(feature = "cumulus-pallet-xcm")]
 pub use cumulus_pallet_xcm;