diff --git a/.config/lychee.toml b/.config/lychee.toml
index 58f8d068d9d13d270c19445fa6983c605b0febb4..55e368dacb50815e750b537ddb4e254590aeca5b 100644
--- a/.config/lychee.toml
+++ b/.config/lychee.toml
@@ -52,10 +52,15 @@ exclude = [
 	# Behind a captcha (code 403):
 	"https://chainlist.org/chain/*",
 	"https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/",
+	"https://polymesh.network",
 	"https://www.reddit.com/r/rust/comments/3spfh1/does_collect_allocate_more_than_once_while/",
 
 	# 403 rate limited:
 	"https://etherscan.io/block/11090290",
 	"https://subscan.io/",
 	"https://substrate.stackexchange.com/.*",
+
+	# Exclude strings which contain templates like {} and {:?}
+	"%7B%7D",
+	"%7B:\\?}",
 ]
diff --git a/.github/workflows/benchmarks-subsystem.yml b/.github/workflows/benchmarks-subsystem.yml
index 210714d847ff032dee89a4dfbd36e9a34b7f5940..82aff7e694f9b241f7d27c867559ae739ee7f1b8 100644
--- a/.github/workflows/benchmarks-subsystem.yml
+++ b/.github/workflows/benchmarks-subsystem.yml
@@ -16,9 +16,11 @@ permissions:
   contents: read
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
-
   build:
     timeout-minutes: 80
     needs: [preflight]
diff --git a/.github/workflows/build-misc.yml b/.github/workflows/build-misc.yml
index e1ef29f305d0f1e97866d84a83a960a04734b76c..fc29bfb7b34376612868c574901d8a86719ccc9a 100644
--- a/.github/workflows/build-misc.yml
+++ b/.github/workflows/build-misc.yml
@@ -16,7 +16,10 @@ permissions:
   contents: read
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   build-runtimes-polkavm:
diff --git a/.github/workflows/build-publish-eth-rpc.yml b/.github/workflows/build-publish-eth-rpc.yml
index 621ac3cd44c3c49d474f420979f2d318720c18ca..e576f3b7938043f48c8f49843e7baa6032d35b27 100644
--- a/.github/workflows/build-publish-eth-rpc.yml
+++ b/.github/workflows/build-publish-eth-rpc.yml
@@ -5,7 +5,7 @@ on:
     branches:
       - master
   pull_request:
-    types: [opened, synchronize, reopened, ready_for_review, labeled]
+    types: [opened, synchronize, reopened, ready_for_review]
 
 concurrency:
   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
@@ -15,10 +15,13 @@ env:
   ETH_RPC_IMAGE_NAME: "docker.io/paritypr/eth-rpc"
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   set-variables:
     # This workaround sets the container image for each job using 'set-variables' job output.
     # env variables don't work for PR from forks, so we need to use outputs.
     runs-on: ubuntu-latest
+    needs: isdraft
     outputs:
       VERSION: ${{ steps.version.outputs.VERSION }}
     steps:
@@ -77,4 +80,3 @@ jobs:
           push: true
           tags: |
             ${{ env.ETH_RPC_IMAGE_NAME }}:${{ env.VERSION }}
-
diff --git a/.github/workflows/build-publish-images.yml b/.github/workflows/build-publish-images.yml
index 0bf2bded0b0dfeaeb784c985d84ad0d4bef709c5..0ce33e2864a41fcf2d8a0a49b59a66932f0eba95 100644
--- a/.github/workflows/build-publish-images.yml
+++ b/.github/workflows/build-publish-images.yml
@@ -6,7 +6,7 @@ on:
     branches:
       - master
   pull_request:
-    types: [opened, synchronize, reopened, ready_for_review, labeled]
+    types: [opened, synchronize, reopened, ready_for_review]
   merge_group:
 concurrency:
   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
@@ -18,7 +18,10 @@ jobs:
   #
   #
   #
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   ### Build ########################
diff --git a/.github/workflows/check-cargo-check-runtimes.yml b/.github/workflows/check-cargo-check-runtimes.yml
index 376c34d1f25fd965f69f3c2874b38c39e6a76573..c4ad0f3d139de40908415c9d6c1202d22f38d339 100644
--- a/.github/workflows/check-cargo-check-runtimes.yml
+++ b/.github/workflows/check-cargo-check-runtimes.yml
@@ -13,7 +13,10 @@ on:
 # Jobs in this workflow depend on each other, only for limiting peak amount of spawned workers
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   check-runtime-assets:
diff --git a/.github/workflows/check-frame-omni-bencher.yml b/.github/workflows/check-frame-omni-bencher.yml
index bc0ff82b677414d82b9e5724f3fa0b46612436dc..f4227cf5a6a662e1d1d36d487ca60c935c93b5c6 100644
--- a/.github/workflows/check-frame-omni-bencher.yml
+++ b/.github/workflows/check-frame-omni-bencher.yml
@@ -5,7 +5,7 @@ on:
     branches:
       - master
   pull_request:
-    types: [opened, synchronize, reopened, ready_for_review, labeled]
+    types: [opened, synchronize, reopened, ready_for_review]
   merge_group:
 
 concurrency:
@@ -16,7 +16,10 @@ env:
   ARTIFACTS_NAME: frame-omni-bencher-artifacts
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   quick-benchmarks-omni:
diff --git a/.github/workflows/check-getting-started.yml b/.github/workflows/check-getting-started.yml
index 0661fa144348de687c0ddf1266be593664adc5e4..4178016bc25f8e0bf21ba89b3879d428a11ff8ac 100644
--- a/.github/workflows/check-getting-started.yml
+++ b/.github/workflows/check-getting-started.yml
@@ -26,16 +26,19 @@ on:
     paths:
       - ".github/workflows/check-getting-started.yml"
       - "scripts/getting-started.sh"
+    types: [opened, synchronize, reopened, ready_for_review]
   schedule:
     - cron: "0 5 * * *"
-  workflow_dispatch:
 
 concurrency:
   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
   cancel-in-progress: true
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   check-getting-started:
+    needs: isdraft
     strategy:
       fail-fast: true
       matrix:
@@ -179,6 +182,7 @@ jobs:
         timeout-minutes: 5
 
   check-getting-started-macos:
+    needs: isdraft
     strategy:
       fail-fast: true
       matrix:
diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml
index 9492d06c7fcf93dac1d4741bb2b4580d9e187dee..70f374142dc011202cbe80786f62869a3a419c1f 100644
--- a/.github/workflows/check-licenses.yml
+++ b/.github/workflows/check-licenses.yml
@@ -2,6 +2,7 @@ name: Check licenses
 
 on:
   pull_request:
+    types: [opened, synchronize, reopened, ready_for_review]
   merge_group:
 
 concurrency:
@@ -12,8 +13,11 @@ permissions:
   packages: read
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   check-licenses:
     runs-on: ubuntu-latest
+    needs: isdraft
     timeout-minutes: 10
     env:
       LICENSES: "'Apache-2.0' 'GPL-3.0-only' 'GPL-3.0-or-later WITH Classpath-exception-2.0' 'MIT-0' 'Unlicense'"
diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml
index 81ce23492c721b6d87fb0ef712a8c71d49a18197..6d5a79ff34a3f0da9b12fc4da0d5e29b02971745 100644
--- a/.github/workflows/check-links.yml
+++ b/.github/workflows/check-links.yml
@@ -18,8 +18,11 @@ permissions:
   packages: read
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   link-checker:
     runs-on: ubuntu-latest
+    needs: isdraft
     timeout-minutes: 10
     steps:
       - name: Restore lychee cache
diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml
index 8af1dd8cef708ec2f374ef9dca7fdeec150254ad..2b68e0112685078b2f189f73d7e85672cc1941a8 100644
--- a/.github/workflows/check-prdoc.yml
+++ b/.github/workflows/check-prdoc.yml
@@ -22,49 +22,33 @@ jobs:
   check-prdoc:
     runs-on: ubuntu-latest
     timeout-minutes: 10
-    if: github.event.pull_request.number != ''
     steps:
       - name: Checkout repo
         uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc #v4.1.7
-      # we cannot show the version in this step (ie before checking out the repo)
-      # due to https://github.com/paritytech/prdoc/issues/15
-      - name: Check if PRdoc is required
-        id: get-labels
+      - name: Check prdoc format
         run: |
           echo "Pulling $IMAGE"
           $ENGINE pull $IMAGE
 
-          # Fetch the labels for the PR under test
-          echo "Fetch the labels for $API_BASE/${REPO}/pulls/${GITHUB_PR}"
-          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
-          echo "Labels: ${labels}"
-          echo "labels=${labels}" >> "$GITHUB_OUTPUT"
-
           echo "Checking PRdoc version"
           $ENGINE run --rm -v $PWD:/repo $IMAGE --version
 
-      - name: Early exit if PR is silent
-        if: ${{ contains(steps.get-labels.outputs.labels, 'R0') }}
-        run: |
-          hits=$(find prdoc -name "pr_$GITHUB_PR*.prdoc" | wc -l)
-          if (( hits > 0 )); then
-            echo "PR detected as silent, but a PRDoc was found, checking it as information"
-            $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} || echo "Ignoring failure"
-          else
-            echo "PR detected as silent, no PRDoc found, exiting..."
-          fi
-          echo "If you want to add a PRDoc, please refer to $PRDOC_DOC"
-          exit 0
+          echo "Check prdoc format"
+          echo "For PRDoc format, please refer to $PRDOC_DOC"
+          $ENGINE run --rm -v $PWD:/repo -e RUST_LOG=info $IMAGE check
 
-      - name: PRdoc check for PR#${{ github.event.pull_request.number }}
-        if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }}
+      - name: Check if PRdoc is required
+        if: github.event.pull_request.number != ''
+        id: get-labels
         run: |
-          echo "Checking for PR#${GITHUB_PR}"
-          echo "You can find more information about PRDoc at $PRDOC_DOC"
-          $ENGINE run --rm -v $PWD:/repo -e RUST_LOG=info $IMAGE check -n ${GITHUB_PR}
+          # Fetch the labels for the PR under test
+          echo "Fetch the labels for $API_BASE/${REPO}/pulls/${GITHUB_PR}"
+          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
+          echo "Labels: ${labels}"
+          echo "labels=${labels}" >> "$GITHUB_OUTPUT"
 
       - name: Validate prdoc for PR#${{ github.event.pull_request.number }}
-        if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }}
+        if: ${{ github.event.pull_request.number != '' && !contains(steps.get-labels.outputs.labels, 'R0') }}
         run: |
           echo "Validating PR#${GITHUB_PR}"
           python3 --version
diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml
index e935f1cb44981e07f06cfeb74811fec29e9f41a3..0596bab3ae82c16c3ddda95d021e1d5d9a6517e1 100644
--- a/.github/workflows/check-runtime-migration.yml
+++ b/.github/workflows/check-runtime-migration.yml
@@ -19,7 +19,10 @@ concurrency:
 permissions: {}
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   # More info can be found here: https://github.com/paritytech/polkadot/pull/5865
diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml
index a6e90cee53470810959941e4d86832c1b28bddd3..958a1590bd2488d796a22f3b32f49b3b028113e6 100644
--- a/.github/workflows/check-semver.yml
+++ b/.github/workflows/check-semver.yml
@@ -14,7 +14,10 @@ env:
   TOOLCHAIN: nightly-2024-11-19
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
   check-semver:
     runs-on: ubuntu-latest
diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml
index 1a8813833deff8cccb412d832a38de470e0dc933..ff88fa6ff9f722f9b07f819088b631d7e608320f 100644
--- a/.github/workflows/checks-quick.yml
+++ b/.github/workflows/checks-quick.yml
@@ -15,7 +15,10 @@ concurrency:
 permissions: {}
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   fmt:
@@ -37,6 +40,7 @@ jobs:
           app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }}
   check-dependency-rules:
     runs-on: ubuntu-latest
+    needs: isdraft
     timeout-minutes: 20
     steps:
       - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7
@@ -84,6 +88,7 @@ jobs:
           echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues"
   check-workspace:
     runs-on: ubuntu-latest
+    needs: isdraft
     timeout-minutes: 20
     steps:
       - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023)
@@ -102,6 +107,7 @@ jobs:
         run: python3 .github/scripts/deny-git-deps.py .
   check-markdown:
     runs-on: ubuntu-latest
+    needs: isdraft
     timeout-minutes: 20
     steps:
       - name: Checkout sources
@@ -154,6 +160,7 @@ jobs:
           fi
   check-fail-ci:
     runs-on: ubuntu-latest
+    needs: isdraft
     container:
       # there's no "rg" in ci-unified, and tools is a smaller image anyway
       image: "paritytech/tools:latest"
@@ -179,6 +186,7 @@ jobs:
           GIT_DEPTH: 1
   check-readme:
     runs-on: ubuntu-latest
+    needs: isdraft
     timeout-minutes: 10
     steps:
       - uses: actions/checkout@v4
diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml
index 02428711811f957e35de0cf27af8ba51f9c40547..d1d97534d9e1803a00f05da01dc6fa5a5ff801cb 100644
--- a/.github/workflows/checks.yml
+++ b/.github/workflows/checks.yml
@@ -15,7 +15,10 @@ concurrency:
 permissions: {}
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   cargo-clippy:
diff --git a/.github/workflows/cmd-tests.yml b/.github/workflows/cmd-tests.yml
index af73c6a5b2d32a19e7b64de4935f71988fbf8a1f..d2b46a98056a3a163d4095b4f025b45073af1b04 100644
--- a/.github/workflows/cmd-tests.yml
+++ b/.github/workflows/cmd-tests.yml
@@ -2,6 +2,7 @@ name: Command Bot Tests
 
 on:
   pull_request:
+    types: [opened, synchronize, reopened, ready_for_review]
 
 permissions:
   contents: read
@@ -11,8 +12,11 @@ concurrency:
   cancel-in-progress: true
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   test-cmd-bot:
     runs-on: ubuntu-latest
+    needs: [isdraft]
     steps:
       - uses: actions/checkout@v4
       - run: python3 .github/scripts/cmd/test_cmd.py
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index b7c70c9e6d66f2dbef81c8f5aa2253b3b075820f..24fb284780be821bc4b947c9f62b6421ec205791 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -5,7 +5,7 @@ on:
     branches:
       - master
   pull_request:
-    types: [opened, synchronize, reopened, ready_for_review, labeled]
+    types: [opened, synchronize, reopened, ready_for_review]
   merge_group:
 
 concurrency:
@@ -13,7 +13,10 @@ concurrency:
   cancel-in-progress: true
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   test-doc:
@@ -77,6 +80,7 @@ jobs:
 
   build-implementers-guide:
     runs-on: ubuntu-latest
+    needs: isdraft
     container:
       image: paritytech/mdbook-utils:e14aae4a-20221123
       options: --user root
diff --git a/.github/workflows/gitspiegel-trigger.yml b/.github/workflows/gitspiegel-trigger.yml
index 01058ad74d0b71385a8096964ea6c779fc6f4869..954e4ad6e70dbfb36c2b8c9deb272c2246be3f66 100644
--- a/.github/workflows/gitspiegel-trigger.yml
+++ b/.github/workflows/gitspiegel-trigger.yml
@@ -28,8 +28,11 @@ on:
 permissions: {}
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   sync:
     runs-on: ubuntu-latest
+    needs: isdraft
     steps:
       - name: Do nothing
         run: echo "let's go"
diff --git a/.github/workflows/publish-check-compile.yml b/.github/workflows/publish-check-compile.yml
index f20909106a82098d79ab8e47b03bf5b4076b9380..66d619a2426d394edace31c10e8abf4cc5ee6423 100644
--- a/.github/workflows/publish-check-compile.yml
+++ b/.github/workflows/publish-check-compile.yml
@@ -13,7 +13,10 @@ concurrency:
   cancel-in-progress: true
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   check-publish-compile:
diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml
index c1b13243ba193c7c141782dc263c577c5f0ec159..ac204b97dbdfe318dfeb021e6f3eb23c3c757b16 100644
--- a/.github/workflows/publish-check-crates.yml
+++ b/.github/workflows/publish-check-crates.yml
@@ -13,8 +13,11 @@ concurrency:
   cancel-in-progress: true
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   check-publish:
     runs-on: ubuntu-latest
+    needs: isdraft
     steps:
       - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7
 
diff --git a/.github/workflows/reusable-isdraft.yml b/.github/workflows/reusable-isdraft.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a0ee5c064fbe21e920d6f634f72ffa8dafd30c5c
--- /dev/null
+++ b/.github/workflows/reusable-isdraft.yml
@@ -0,0 +1,16 @@
+# The workflow is not part of reusable-preflight.yml to allow testing CI in draft.
+
+name: Preflight isdraft
+
+on:
+  workflow_call:
+    # Map the workflow outputs to job outputs
+
+jobs:
+  isdraft:
+    runs-on: ubuntu-latest
+    if: github.event.pull_request.draft == false || contains(github.event.pull_request.labels.*.name, 'A5-run-CI')
+    steps:
+      - name: echo test
+        shell: bash
+        run: echo "PR is not draft, starting CI"
diff --git a/.github/workflows/tests-evm.yml b/.github/workflows/tests-evm.yml
index 2c98fa39d23a3021d4f0039d76f70d7729be9dce..3bf3b6f275ff0d0f583c7b030ff15f17aa0f72b3 100644
--- a/.github/workflows/tests-evm.yml
+++ b/.github/workflows/tests-evm.yml
@@ -12,7 +12,10 @@ concurrency:
   cancel-in-progress: true
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   evm-test-suite:
diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml
index 3f8dc4fe1240a078aef7ca498bc1e8667f13dda2..029d7bc7411326f90bbe9f2cf1f941a9cfc55136 100644
--- a/.github/workflows/tests-linux-stable.yml
+++ b/.github/workflows/tests-linux-stable.yml
@@ -13,7 +13,10 @@ concurrency:
   cancel-in-progress: true
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   test-linux-stable-int:
diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml
index decd88f2e84cfb5cf7519046f0018e6119c03315..77e39a99f9ed9f0241f95b1cc27ec523ecd2079b 100644
--- a/.github/workflows/tests-misc.yml
+++ b/.github/workflows/tests-misc.yml
@@ -14,7 +14,10 @@ concurrency:
 # Jobs in this workflow depend on each other, only for limiting peak amount of spawned workers
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   # more information about this job can be found here:
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index ba0574b51e686fa9c3ffa4e3b35e986c7f373e55..a178ea02f0c5f142376db1be12f747a027ec0647 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -12,7 +12,10 @@ concurrency:
   cancel-in-progress: true
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/reusable-preflight.yml
 
   # This job runs all benchmarks defined in the `/bin/node/runtime` once to check that there are no errors.
diff --git a/.github/workflows/zombienet_cumulus.yml b/.github/workflows/zombienet_cumulus.yml
index c2231060c9aa1fe336060ace9768c50bd9d3b0fd..492c63b302a10ebf2840041e40dbfc8099db3a81 100644
--- a/.github/workflows/zombienet_cumulus.yml
+++ b/.github/workflows/zombienet_cumulus.yml
@@ -19,7 +19,10 @@ env:
 
 # only run if we have changes in [subtrate, cumulus, polkadot] directories or this workflow.
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/zombienet-reusable-preflight.yml
 
   zombienet-cumulus-0001-sync_blocks_from_tip_without_connected_collator:
@@ -158,7 +161,6 @@ jobs:
           path: |
             /tmp/zombie*/logs/*
 
-
   zombienet-cumulus-0005-migrate_solo_to_para:
     needs: [preflight]
     if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }}
diff --git a/.github/workflows/zombienet_substrate.yml b/.github/workflows/zombienet_substrate.yml
index 358438caa8afe7ddb65dff67cfc05c869c9742a1..7ddc2e613b238e83bdd2d701460bfdb22bc66039 100644
--- a/.github/workflows/zombienet_substrate.yml
+++ b/.github/workflows/zombienet_substrate.yml
@@ -24,7 +24,10 @@ env:
   DB_BLOCK_HEIGHT: 56687
 
 jobs:
+  isdraft:
+    uses: ./.github/workflows/reusable-isdraft.yml
   preflight:
+    needs: isdraft
     uses: ./.github/workflows/zombienet-reusable-preflight.yml
 
   zombienet-substrate-0000-block-building:
diff --git a/Cargo.lock b/Cargo.lock
index f1d3085f9a3f294e612f00fb344cf1ad95af7499..61d286549ff6cb0f7fefa22d8047f1795a5b9e51 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3045,6 +3045,7 @@ version = "1.0.0"
 dependencies = [
  "asset-hub-westend-runtime",
  "bridge-hub-westend-runtime",
+ "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "emulated-integration-tests-common",
  "frame-support 28.0.0",
@@ -7601,9 +7602,9 @@ dependencies = [
 
 [[package]]
 name = "frame-metadata"
-version = "18.0.0"
+version = "19.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "daaf440c68eb2c3d88e5760fe8c7af3f9fee9181fab6c2f2c4e7cc48dcc40bb8"
+checksum = "835a2e736d544b98dab966b4b9541f15af416288a86c3738fdd67bd9fbc4696e"
 dependencies = [
  "cfg-if",
  "parity-scale-codec",
@@ -7618,7 +7619,7 @@ dependencies = [
  "array-bytes",
  "const-hex",
  "docify",
- "frame-metadata 18.0.0",
+ "frame-metadata 19.0.0",
  "frame-support 28.0.0",
  "frame-system 28.0.0",
  "log",
@@ -7703,7 +7704,7 @@ dependencies = [
  "bitflags 1.3.2",
  "docify",
  "environmental",
- "frame-metadata 18.0.0",
+ "frame-metadata 19.0.0",
  "frame-support-procedural 23.0.0",
  "frame-system 28.0.0",
  "impl-trait-for-tuples",
@@ -7881,7 +7882,7 @@ version = "3.0.0"
 dependencies = [
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
- "frame-metadata 18.0.0",
+ "frame-metadata 19.0.0",
  "frame-support 28.0.0",
  "frame-support-test-pallet",
  "frame-system 28.0.0",
@@ -8363,6 +8364,19 @@ dependencies = [
  "stable_deref_trait",
 ]
 
+[[package]]
+name = "git2"
+version = "0.20.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fda788993cc341f69012feba8bf45c0ba4f3291fcc08e214b4d5a7332d88aff"
+dependencies = [
+ "bitflags 2.6.0",
+ "libc",
+ "libgit2-sys",
+ "log",
+ "url",
+]
+
 [[package]]
 name = "glob"
 version = "0.3.1"
@@ -8905,7 +8919,7 @@ dependencies = [
  "httpdate",
  "itoa",
  "pin-project-lite",
- "socket2 0.4.9",
+ "socket2 0.5.7",
  "tokio",
  "tower-service",
  "tracing",
@@ -10015,6 +10029,18 @@ dependencies = [
  "once_cell",
 ]
 
+[[package]]
+name = "libgit2-sys"
+version = "0.18.0+1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1a117465e7e1597e8febea8bb0c410f1c7fb93b1e1cddf34363f8390367ffec"
+dependencies = [
+ "cc",
+ "libc",
+ "libz-sys",
+ "pkg-config",
+]
+
 [[package]]
 name = "libloading"
 version = "0.7.4"
@@ -10965,13 +10991,13 @@ dependencies = [
 
 [[package]]
 name = "merkleized-metadata"
-version = "0.2.0"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38c592efaf1b3250df14c8f3c2d952233f0302bb81d3586db2f303666c1cd607"
+checksum = "e656ca4a152e892cbbf4b621c242b070136643b797709007d4861881d71ff4c9"
 dependencies = [
  "array-bytes",
  "blake3",
- "frame-metadata 18.0.0",
+ "frame-metadata 19.0.0",
  "parity-scale-codec",
  "scale-decode 0.13.1",
  "scale-info",
@@ -13871,7 +13897,7 @@ name = "pallet-example-view-functions"
 version = "1.0.0"
 dependencies = [
  "frame-benchmarking 28.0.0",
- "frame-metadata 18.0.0",
+ "frame-metadata 19.0.0",
  "frame-support 28.0.0",
  "frame-system 28.0.0",
  "log",
@@ -15157,6 +15183,7 @@ dependencies = [
  "env_logger 0.11.3",
  "ethabi",
  "futures",
+ "git2",
  "hex",
  "jsonrpsee",
  "log",
@@ -15731,6 +15758,42 @@ dependencies = [
  "sp-staking 36.0.0",
 ]
 
+[[package]]
+name = "pallet-staking-ah-client"
+version = "0.1.0"
+dependencies = [
+ "frame-support 28.0.0",
+ "frame-system 28.0.0",
+ "log",
+ "pallet-authorship 28.0.0",
+ "pallet-session 28.0.0",
+ "pallet-staking 28.0.0",
+ "pallet-staking-rc-client",
+ "parity-scale-codec",
+ "polkadot-primitives 7.0.0",
+ "polkadot-runtime-parachains 7.0.0",
+ "scale-info",
+ "sp-core 28.0.0",
+ "sp-runtime 31.0.1",
+ "sp-staking 26.0.0",
+ "staging-xcm 7.0.0",
+]
+
+[[package]]
+name = "pallet-staking-rc-client"
+version = "0.1.0"
+dependencies = [
+ "frame-support 28.0.0",
+ "frame-system 28.0.0",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core 28.0.0",
+ "sp-runtime 31.0.1",
+ "sp-staking 26.0.0",
+ "staging-xcm 7.0.0",
+]
+
 [[package]]
 name = "pallet-staking-reward-curve"
 version = "11.0.0"
@@ -16677,7 +16740,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9"
 dependencies = [
  "bitcoin_hashes 0.13.0",
  "rand",
- "rand_core 0.5.1",
+ "rand_core 0.6.4",
  "serde",
  "unicode-normalization",
 ]
@@ -19056,6 +19119,8 @@ dependencies = [
  "pallet-skip-feeless-payment 3.0.0",
  "pallet-society 28.0.0",
  "pallet-staking 28.0.0",
+ "pallet-staking-ah-client",
+ "pallet-staking-rc-client",
  "pallet-staking-reward-curve",
  "pallet-staking-reward-fn 19.0.0",
  "pallet-staking-runtime-api 14.0.0",
@@ -20939,7 +21004,7 @@ checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302"
 dependencies = [
  "bytes",
  "heck 0.5.0",
- "itertools 0.12.1",
+ "itertools 0.13.0",
  "log",
  "multimap",
  "once_cell",
@@ -20985,7 +21050,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac"
 dependencies = [
  "anyhow",
- "itertools 0.12.1",
+ "itertools 0.13.0",
  "proc-macro2 1.0.93",
  "quote 1.0.38",
  "syn 2.0.98",
@@ -27166,7 +27231,7 @@ dependencies = [
 name = "sp-metadata-ir"
 version = "0.6.0"
 dependencies = [
- "frame-metadata 18.0.0",
+ "frame-metadata 19.0.0",
  "parity-scale-codec",
  "scale-info",
 ]
@@ -29271,7 +29336,7 @@ dependencies = [
  "cargo_metadata",
  "console",
  "filetime",
- "frame-metadata 18.0.0",
+ "frame-metadata 19.0.0",
  "jobserver",
  "merkleized-metadata",
  "parity-scale-codec",
diff --git a/Cargo.toml b/Cargo.toml
index aaa600ebdc93e191f81986828040412f4b5d2987..7d64d6fd1904c47631f1f9d8efe5526c67dd42d9 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -423,6 +423,8 @@ members = [
 	"substrate/frame/session/benchmarking",
 	"substrate/frame/society",
 	"substrate/frame/staking",
+	"substrate/frame/staking/ah-client",
+	"substrate/frame/staking/rc-client",
 	"substrate/frame/staking/reward-curve",
 	"substrate/frame/staking/reward-fn",
 	"substrate/frame/staking/runtime-api",
@@ -791,7 +793,7 @@ frame-benchmarking-pallet-pov = { default-features = false, path = "substrate/fr
 frame-election-provider-solution-type = { path = "substrate/frame/election-provider-support/solution-type", default-features = false }
 frame-election-provider-support = { path = "substrate/frame/election-provider-support", default-features = false }
 frame-executive = { path = "substrate/frame/executive", default-features = false }
-frame-metadata = { version = "18.0.0", default-features = false }
+frame-metadata = { version = "19.0.0", default-features = false }
 frame-metadata-hash-extension = { path = "substrate/frame/metadata-hash-extension", default-features = false }
 frame-support = { path = "substrate/frame/support", default-features = false }
 frame-support-procedural = { path = "substrate/frame/support/procedural", default-features = false }
@@ -866,7 +868,7 @@ macro_magic = { version = "0.5.1" }
 maplit = { version = "1.0.2" }
 memmap2 = { version = "0.9.3" }
 memory-db = { version = "0.32.0", default-features = false }
-merkleized-metadata = { version = "0.2.0" }
+merkleized-metadata = { version = "0.3.0" }
 merlin = { version = "3.0", default-features = false }
 messages-relay = { path = "bridges/relays/messages" }
 metered = { version = "0.6.1", default-features = false, package = "prioritized-metered-channel" }
@@ -1005,6 +1007,8 @@ pallet-session-benchmarking = { path = "substrate/frame/session/benchmarking", d
 pallet-skip-feeless-payment = { path = "substrate/frame/transaction-payment/skip-feeless-payment", default-features = false }
 pallet-society = { path = "substrate/frame/society", default-features = false }
 pallet-staking = { path = "substrate/frame/staking", default-features = false }
+pallet-staking-ah-client = { path = "substrate/frame/staking/ah-client", default-features = false }
+pallet-staking-rc-client = { path = "substrate/frame/staking/rc-client", default-features = false }
 pallet-staking-reward-curve = { path = "substrate/frame/staking/reward-curve", default-features = false }
 pallet-staking-reward-fn = { path = "substrate/frame/staking/reward-fn", default-features = false }
 pallet-staking-runtime-api = { path = "substrate/frame/staking/runtime-api", default-features = false }
diff --git a/bridges/relays/client-substrate/src/client/rpc.rs b/bridges/relays/client-substrate/src/client/rpc.rs
index 9c7f769462e5693bc944ed6a6525439f00311ee7..7147bad3c28ef9ab47c4feb2943ed81d11cd8c10 100644
--- a/bridges/relays/client-substrate/src/client/rpc.rs
+++ b/bridges/relays/client-substrate/src/client/rpc.rs
@@ -194,21 +194,7 @@ impl<C: Chain> RpcClient<C> {
 		params: &ConnectionParams,
 	) -> Result<(Arc<tokio::runtime::Runtime>, Arc<WsClient>)> {
 		let tokio = tokio::runtime::Runtime::new()?;
-		let uri = match params.uri {
-			Some(ref uri) => uri.clone(),
-			None => {
-				format!(
-					"{}://{}:{}{}",
-					if params.secure { "wss" } else { "ws" },
-					params.host,
-					params.port,
-					match params.path {
-						Some(ref path) => format!("/{}", path),
-						None => String::new(),
-					},
-				)
-			},
-		};
+		let uri = params.uri.clone();
 		log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri);
 
 		let client = tokio
diff --git a/bridges/relays/client-substrate/src/lib.rs b/bridges/relays/client-substrate/src/lib.rs
index 12a1c48c09c7ad59d05c0e40a578bc71f7575b35..7eb1e4a6cde81c73a220f8af2f9ba0b130f237ba 100644
--- a/bridges/relays/client-substrate/src/lib.rs
+++ b/bridges/relays/client-substrate/src/lib.rs
@@ -57,17 +57,8 @@ pub use bp_runtime::{
 /// Substrate-over-websocket connection params.
 #[derive(Debug, Clone)]
 pub struct ConnectionParams {
-	/// Websocket endpoint URL. Overrides all other URL components (`host`, `port`, `path` and
-	/// `secure`).
-	pub uri: Option<String>,
-	/// Websocket server host name.
-	pub host: String,
-	/// Websocket server TCP port.
-	pub port: u16,
-	/// Websocket endpoint path at server.
-	pub path: Option<String>,
-	/// Use secure websocket connection.
-	pub secure: bool,
+	/// Websocket endpoint URL.
+	pub uri: String,
 	/// Defined chain runtime version
 	pub chain_runtime_version: ChainRuntimeVersion,
 }
@@ -75,11 +66,7 @@ pub struct ConnectionParams {
 impl Default for ConnectionParams {
 	fn default() -> Self {
 		ConnectionParams {
-			uri: None,
-			host: "localhost".into(),
-			port: 9944,
-			path: None,
-			secure: false,
+			uri: "ws://localhost:9944".into(),
 			chain_runtime_version: ChainRuntimeVersion::Auto,
 		}
 	}
diff --git a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs
index d985d35c9e802e694e74264ed3f611f14e8bc0d2..ae00b243c0c4328e70245d638dda1a196257be7a 100644
--- a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs
+++ b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs
@@ -98,21 +98,9 @@ macro_rules! declare_chain_connection_params_cli_schema {
 			#[doc = $chain " connection params."]
 			#[derive(StructOpt, Debug, PartialEq, Eq, Clone)]
 			pub struct [<$chain ConnectionParams>] {
-				#[doc = "WS endpoint of " $chain ": full URI. Overrides all other connection string components (host, port, path, secure)."]
+				#[doc = "WS endpoint of " $chain ": full URI."]
 				#[structopt(long)]
-				pub [<$chain_prefix _uri>]: Option<String>,
-				#[doc = "WS endpoint of " $chain ": host component."]
-				#[structopt(long, default_value = "127.0.0.1")]
-				pub [<$chain_prefix _host>]: String,
-				#[doc = "WS endpoint of " $chain ": port component."]
-				#[structopt(long, default_value = "9944")]
-				pub [<$chain_prefix _port>]: u16,
-				#[doc = "WS endpoint of " $chain ": path component."]
-				#[structopt(long)]
-				pub [<$chain_prefix _path>]: Option<String>,
-				#[doc = "Use secure websocket connection."]
-				#[structopt(long)]
-				pub [<$chain_prefix _secure>]: bool,
+				pub [<$chain_prefix _uri>]: String,
 				#[doc = "Custom runtime version"]
 				#[structopt(flatten)]
 				pub [<$chain_prefix _runtime_version>]: [<$chain RuntimeVersionParams>],
@@ -129,10 +117,6 @@ macro_rules! declare_chain_connection_params_cli_schema {
 						.into_runtime_version(Chain::RUNTIME_VERSION)?;
 					Ok(relay_substrate_client::new(relay_substrate_client::ConnectionParams {
 						uri: self.[<$chain_prefix _uri>],
-						host: self.[<$chain_prefix _host>],
-						port: self.[<$chain_prefix _port>],
-						path: self.[<$chain_prefix _path>],
-						secure: self.[<$chain_prefix _secure>],
 						chain_runtime_version,
 					})
 					.await
diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs
index bb6c689a76eb02bdbf6def9d3ac5d4742817ab06..078bf8ae735ae57af57292258ac1f4bba7a2b82b 100644
--- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs
+++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs
@@ -422,30 +422,22 @@ mod tests {
 
 		let res = BridgeHubKusamaBridgeHubPolkadotHeadersAndMessages::from_iter(vec![
 			"bridge-hub-kusama-bridge-hub-polkadot-headers-and-messages",
-			"--bridge-hub-kusama-host",
-			"bridge-hub-kusama-node-collator1",
-			"--bridge-hub-kusama-port",
-			"9944",
+			"--bridge-hub-kusama-uri",
+			"ws://bridge-hub-kusama-node-collator1:9944",
 			"--bridge-hub-kusama-signer",
 			"//Iden",
 			"--bridge-hub-kusama-transactions-mortality",
 			"64",
-			"--kusama-host",
-			"kusama-alice",
-			"--kusama-port",
-			"9944",
-			"--bridge-hub-polkadot-host",
-			"bridge-hub-polkadot-collator1",
-			"--bridge-hub-polkadot-port",
-			"9944",
+			"--kusama-uri",
+			"ws://kusama-alice:9944",
+			"--bridge-hub-polkadot-uri",
+			"ws://bridge-hub-polkadot-collator1:9944",
 			"--bridge-hub-polkadot-signer",
 			"//George",
 			"--bridge-hub-polkadot-transactions-mortality",
 			"64",
-			"--polkadot-host",
-			"polkadot-alice",
-			"--polkadot-port",
-			"9944",
+			"--polkadot-uri",
+			"ws://polkadot-alice:9944",
 			"--lane",
 			"0000000000000000000000000000000000000000000000000000000000000000",
 			"--prometheus-host",
@@ -467,11 +459,7 @@ mod tests {
 					},
 				},
 				left: BridgeHubKusamaConnectionParams {
-					bridge_hub_kusama_uri: None,
-					bridge_hub_kusama_host: "bridge-hub-kusama-node-collator1".into(),
-					bridge_hub_kusama_port: 9944,
-					bridge_hub_kusama_path: None,
-					bridge_hub_kusama_secure: false,
+					bridge_hub_kusama_uri: "ws://bridge-hub-kusama-node-collator1:9944".into(),
 					bridge_hub_kusama_runtime_version: BridgeHubKusamaRuntimeVersionParams {
 						bridge_hub_kusama_version_mode: RuntimeVersionType::Bundle,
 						bridge_hub_kusama_spec_version: None,
@@ -486,11 +474,7 @@ mod tests {
 					bridge_hub_kusama_transactions_mortality: Some(64),
 				},
 				left_relay: KusamaConnectionParams {
-					kusama_uri: None,
-					kusama_host: "kusama-alice".into(),
-					kusama_port: 9944,
-					kusama_path: None,
-					kusama_secure: false,
+					kusama_uri: "ws://kusama-alice:9944".into(),
 					kusama_runtime_version: KusamaRuntimeVersionParams {
 						kusama_version_mode: RuntimeVersionType::Bundle,
 						kusama_spec_version: None,
@@ -498,11 +482,7 @@ mod tests {
 					},
 				},
 				right: BridgeHubPolkadotConnectionParams {
-					bridge_hub_polkadot_uri: None,
-					bridge_hub_polkadot_host: "bridge-hub-polkadot-collator1".into(),
-					bridge_hub_polkadot_port: 9944,
-					bridge_hub_polkadot_path: None,
-					bridge_hub_polkadot_secure: false,
+					bridge_hub_polkadot_uri: "ws://bridge-hub-polkadot-collator1:9944".into(),
 					bridge_hub_polkadot_runtime_version: BridgeHubPolkadotRuntimeVersionParams {
 						bridge_hub_polkadot_version_mode: RuntimeVersionType::Bundle,
 						bridge_hub_polkadot_spec_version: None,
@@ -517,11 +497,7 @@ mod tests {
 					bridge_hub_polkadot_transactions_mortality: Some(64),
 				},
 				right_relay: PolkadotConnectionParams {
-					polkadot_uri: None,
-					polkadot_host: "polkadot-alice".into(),
-					polkadot_port: 9944,
-					polkadot_path: None,
-					polkadot_secure: false,
+					polkadot_uri: "ws://polkadot-alice:9944".into(),
 					polkadot_runtime_version: PolkadotRuntimeVersionParams {
 						polkadot_version_mode: RuntimeVersionType::Bundle,
 						polkadot_spec_version: None,
diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh
index 321f4d9f26d0bc3c2f3008ac3aee4f75a45b053e..b146c28ff5585d6889e7252ef9cb5c9d19e92ea1 100755
--- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh
+++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh
@@ -131,13 +131,11 @@ function init_ro_wnd() {
 
     RUST_LOG=runtime=trace,rpc=trace,bridge=trace \
         $relayer_path init-bridge rococo-to-bridge-hub-westend \
-	--source-host localhost \
-	--source-port 9942 \
-	--source-version-mode Auto \
-	--target-host localhost \
-	--target-port 8945 \
-	--target-version-mode Auto \
-	--target-signer //Bob
+        --source-uri ws://localhost:9942 \
+        --source-version-mode Auto \
+        --target-uri ws://localhost:8945  \
+        --target-version-mode Auto \
+        --target-signer //Bob
 }
 
 function init_wnd_ro() {
@@ -145,39 +143,13 @@ function init_wnd_ro() {
 
     RUST_LOG=runtime=trace,rpc=trace,bridge=trace \
         $relayer_path init-bridge westend-to-bridge-hub-rococo \
-        --source-host localhost \
-        --source-port 9945 \
+        --source-uri ws://localhost:9945  \
         --source-version-mode Auto \
-        --target-host localhost \
-        --target-port 8943 \
+        --target-uri ws://localhost:8943 \
         --target-version-mode Auto \
         --target-signer //Bob
 }
 
-function run_relay() {
-    local relayer_path=$(ensure_relayer)
-
-    RUST_LOG=runtime=trace,rpc=trace,bridge=trace \
-        $relayer_path relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \
-        --rococo-host localhost \
-        --rococo-port 9942 \
-        --rococo-version-mode Auto \
-        --bridge-hub-rococo-host localhost \
-        --bridge-hub-rococo-port 8943 \
-        --bridge-hub-rococo-version-mode Auto \
-        --bridge-hub-rococo-signer //Charlie \
-        --bridge-hub-rococo-transactions-mortality 4 \
-        --westend-host localhost \
-        --westend-port 9945 \
-        --westend-version-mode Auto \
-        --bridge-hub-westend-host localhost \
-        --bridge-hub-westend-port 8945 \
-        --bridge-hub-westend-version-mode Auto \
-        --bridge-hub-westend-signer //Charlie \
-        --bridge-hub-westend-transactions-mortality 4 \
-        --lane "${LANE_ID}"
-}
-
 function run_finality_relay() {
     local relayer_path=$(ensure_relayer)
 
diff --git a/bridges/testing/scripts/start-relayer.sh b/bridges/testing/scripts/start-relayer.sh
deleted file mode 100755
index 38ea62fad524486c40cf88943c48a2e4df4b86e8..0000000000000000000000000000000000000000
--- a/bridges/testing/scripts/start-relayer.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-RELAY_LOG=`mktemp -p $TEST_FOLDER relay.XXXXX`
-
-pushd $POLKADOT_SDK_PATH/bridges/testing/environments/rococo-westend
-./bridges_rococo_westend.sh run-relay >$RELAY_LOG 2>&1&
-popd
diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs
index 6a97525c4f576f2a956f21be3a89807699423205..002baea02d60d74e47f1508c420aa90461eeb058 100644
--- a/cumulus/pallets/collator-selection/src/mock.rs
+++ b/cumulus/pallets/collator-selection/src/mock.rs
@@ -139,6 +139,7 @@ impl pallet_session::Config for Test {
 	type SessionManager = CollatorSelection;
 	type SessionHandler = TestSessionHandler;
 	type Keys = MockSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml
index f718e7e77f597723c2a53dac3552bb103bab96d9..05c7021d380aeb0eaec892dd00cecc102c836f02 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml
@@ -37,6 +37,7 @@ pallet-bridge-messages = { workspace = true }
 # Cumulus
 asset-hub-westend-runtime = { workspace = true }
 bridge-hub-westend-runtime = { workspace = true }
+cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 emulated-integration-tests-common = { workspace = true }
 parachains-common = { workspace = true, default-features = true }
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs
index 3d4d4f58e3b54f2baf8fc9dcf5683e9b3be4532f..cd5e22372f0e645469cdee8c8fb47ab901456ea4 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs
@@ -61,8 +61,10 @@ mod imports {
 				LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub,
 				UniversalLocation as PenpalUniversalLocation,
 			},
-			PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet,
+			PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner,
+			PenpalBParaPallet as PenpalBPallet,
 		},
+		rococo_emulated_chain::RococoRelayPallet as RococoPallet,
 		westend_emulated_chain::{
 			genesis::ED as WESTEND_ED, westend_runtime::xcm_config::XcmConfig as WestendXcmConfig,
 			WestendRelayPallet as WestendPallet,
@@ -73,10 +75,11 @@ mod imports {
 		AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubRococoPara as BridgeHubRococo,
 		BridgeHubWestendPara as BridgeHubWestend,
 		BridgeHubWestendParaReceiver as BridgeHubWestendReceiver,
-		BridgeHubWestendParaSender as BridgeHubWestendSender, PenpalBPara as PenpalB,
+		BridgeHubWestendParaSender as BridgeHubWestendSender, PenpalAPara as PenpalA,
+		PenpalAParaReceiver as PenpalAReceiver, PenpalBPara as PenpalB,
 		PenpalBParaReceiver as PenpalBReceiver, PenpalBParaSender as PenpalBSender,
-		WestendRelay as Westend, WestendRelayReceiver as WestendReceiver,
-		WestendRelaySender as WestendSender,
+		RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, WestendRelay as Westend,
+		WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender,
 	};
 
 	pub const ASSET_ID: u32 = 1;
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs
index a73c1280b406a7154c05707a4dfe53946ac9c1c9..6da4de550fb5f5dcf5b949ccd5297fd4e2897f01 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs
@@ -14,6 +14,7 @@
 // limitations under the License.
 
 use crate::{create_pool_with_native_on, tests::*};
+use emulated_integration_tests_common::macros::Dmp;
 use xcm::latest::AssetTransferFilter;
 
 fn send_assets_over_bridge<F: FnOnce()>(send_fn: F) {
@@ -41,6 +42,12 @@ fn set_up_wnds_for_penpal_westend_through_ahw_to_ahr(
 	let wnd_at_westend_parachains = wnd_at_ah_westend();
 	let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo();
 	create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo.clone(), true);
+	create_pool_with_native_on!(
+		AssetHubRococo,
+		wnd_at_asset_hub_rococo.clone(),
+		true,
+		AssetHubRococoSender::get()
+	);
 
 	let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id());
 	let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location);
@@ -416,6 +423,295 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo()
 	assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + amount);
 }
 
+#[test]
+fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo_to_penpal_rococo() {
+	let amount = ASSET_HUB_WESTEND_ED * 10_000_000;
+	let sender = PenpalBSender::get();
+	let receiver = PenpalAReceiver::get();
+	let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id());
+	// create foreign WND on remote paras
+	let (wnd_at_westend_parachains, wnd_at_rococo_parachains) =
+		set_up_wnds_for_penpal_westend_through_ahw_to_ahr(&sender, amount);
+	let asset_owner: AccountId = AssetHubRococo::account_id_of(ALICE);
+	// create foreign WND on remote paras
+	PenpalA::force_create_foreign_asset(
+		wnd_at_rococo_parachains.clone(),
+		asset_owner.clone(),
+		true,
+		ASSET_MIN_BALANCE,
+		vec![],
+	);
+	// Configure destination Penpal chain to trust its sibling AH as reserve of bridged WND
+	PenpalA::execute_with(|| {
+		assert_ok!(<PenpalA as Chain>::System::set_storage(
+			<PenpalA as Chain>::RuntimeOrigin::root(),
+			vec![(
+				PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(),
+				wnd_at_rococo_parachains.encode(),
+			)],
+		));
+	});
+	create_pool_with_native_on!(PenpalA, wnd_at_rococo_parachains.clone(), true, asset_owner);
+
+	let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus(
+		ByGenesis(ROCOCO_GENESIS_HASH),
+		AssetHubRococo::para_id(),
+	);
+	let wnds_in_reserve_on_ahw_before =
+		<AssetHubWestend as Chain>::account_data_of(sov_ahr_on_ahw.clone()).free;
+	let sender_wnds_before = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(wnd_at_westend_parachains.clone(), &sender)
+	});
+	let receiver_wnds_before = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(wnd_at_rococo_parachains.clone(), &receiver)
+	});
+
+	// Send WNDs over bridge
+	{
+		let destination = asset_hub_rococo_location();
+		let assets: Assets = (wnd_at_westend_parachains.clone(), amount).into();
+		let asset_transfer_type = TransferType::RemoteReserve(local_asset_hub.clone().into());
+		let fees_id: AssetId = wnd_at_westend_parachains.clone().into();
+		let fees_transfer_type = TransferType::RemoteReserve(local_asset_hub.into());
+		let remote_fees = (bridged_wnd_at_ah_rococo(), amount / 2).into();
+		let beneficiary: Location =
+			AccountId32Junction { network: None, id: receiver.clone().into() }.into();
+		let custom_xcm_on_penpal_dest = Xcm::<()>(vec![
+			BuyExecution { fees: remote_fees, weight_limit: Unlimited },
+			DepositAsset { assets: Wild(AllCounted(assets.len() as u32)), beneficiary },
+		]);
+		let pp_loc_from_ah = AssetHubRococo::sibling_location_of(PenpalA::para_id());
+		let custom_xcm_on_remote_ah = Xcm::<()>(vec![
+			// BuyExecution { fees: remote_fees, weight_limit: Unlimited },
+			DepositReserveAsset {
+				assets: Wild(AllCounted(1)),
+				dest: pp_loc_from_ah,
+				xcm: custom_xcm_on_penpal_dest,
+			},
+		]);
+		send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah(
+			destination,
+			(assets, asset_transfer_type),
+			(fees_id, fees_transfer_type),
+			custom_xcm_on_remote_ah,
+		);
+	}
+
+	// process AHR incoming message and check events
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				// issue WNDs on AHR
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},
+				// message processed successfully
+				RuntimeEvent::MessageQueue(
+					pallet_message_queue::Event::Processed { success: true, .. }
+				) => {},
+			]
+		);
+	});
+	PenpalA::execute_with(|| {
+		PenpalA::assert_xcmp_queue_success(None);
+	});
+
+	let sender_wnds_after = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(wnd_at_westend_parachains, &sender)
+	});
+	let receiver_wnds_after = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(wnd_at_rococo_parachains, &receiver)
+	});
+	let wnds_in_reserve_on_ahw_after =
+		<AssetHubWestend as Chain>::account_data_of(sov_ahr_on_ahw.clone()).free;
+
+	// Sender's balance is reduced
+	assert!(sender_wnds_after < sender_wnds_before);
+	// Receiver's balance is increased
+	assert!(receiver_wnds_after > receiver_wnds_before);
+	// Reserve balance is increased by sent amount (less fess)
+	assert!(wnds_in_reserve_on_ahw_after > wnds_in_reserve_on_ahw_before);
+	assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + amount);
+}
+
+#[test]
+fn send_wnds_from_westend_relay_through_asset_hub_westend_to_asset_hub_rococo_to_penpal_rococo() {
+	let amount = WESTEND_ED * 1_000;
+	let sender = WestendSender::get();
+	let receiver = PenpalAReceiver::get();
+	let local_asset_hub = Westend::child_location_of(AssetHubWestend::para_id());
+
+	let wnd_at_westend_parachains = wnd_at_ah_westend();
+	let wnd_at_rococo_parachains = bridged_wnd_at_ah_rococo();
+	// create foreign WND on AH Rococo
+	create_foreign_on_ah_rococo(wnd_at_rococo_parachains.clone(), true);
+	create_pool_with_native_on!(
+		AssetHubRococo,
+		wnd_at_rococo_parachains.clone(),
+		true,
+		AssetHubRococoSender::get()
+	);
+	// create foreign WND on Penpal Rococo
+	let asset_owner: AccountId = AssetHubRococo::account_id_of(ALICE);
+	PenpalA::force_create_foreign_asset(
+		wnd_at_rococo_parachains.clone(),
+		asset_owner.clone(),
+		true,
+		ASSET_MIN_BALANCE,
+		vec![],
+	);
+	// Configure destination Penpal chain to trust its sibling AH as reserve of bridged WND
+	PenpalA::execute_with(|| {
+		assert_ok!(<PenpalA as Chain>::System::set_storage(
+			<PenpalA as Chain>::RuntimeOrigin::root(),
+			vec![(
+				PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(),
+				wnd_at_rococo_parachains.encode(),
+			)],
+		));
+	});
+	create_pool_with_native_on!(PenpalA, wnd_at_rococo_parachains.clone(), true, asset_owner);
+
+	Westend::execute_with(|| {
+		let root_origin = <Westend as Chain>::RuntimeOrigin::root();
+		<Westend as WestendPallet>::XcmPallet::force_xcm_version(
+			root_origin,
+			bx!(local_asset_hub.clone()),
+			XCM_VERSION,
+		)
+	})
+	.unwrap();
+	AssetHubRococo::force_xcm_version(
+		AssetHubRococo::sibling_location_of(PenpalA::para_id()),
+		XCM_VERSION,
+	);
+
+	let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus(
+		ByGenesis(ROCOCO_GENESIS_HASH),
+		AssetHubRococo::para_id(),
+	);
+	let wnds_in_reserve_on_ahw_before =
+		<AssetHubWestend as Chain>::account_data_of(sov_ahr_on_ahw.clone()).free;
+	let sender_wnds_before = <Westend as Chain>::account_data_of(sender.clone()).free;
+	let receiver_wnds_before = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(wnd_at_rococo_parachains.clone(), &receiver)
+	});
+
+	// Send WNDs from Westend to AHW over bridge to AHR then onto Penpal parachain
+	{
+		let beneficiary: Location =
+			AccountId32Junction { network: None, id: receiver.clone().into() }.into();
+		// executes on Westend Relay
+		let kusama_xcm = Xcm::<()>(vec![
+			WithdrawAsset((Location::here(), amount).into()),
+			SetFeesMode { jit_withdraw: true },
+			InitiateTeleport {
+				assets: Wild(AllCounted(1)),
+				dest: local_asset_hub,
+				// executes on Westend Asset Hub
+				xcm: Xcm::<()>(vec![
+					BuyExecution {
+						fees: (wnd_at_westend_parachains, amount / 2).into(),
+						weight_limit: Unlimited,
+					},
+					DepositReserveAsset {
+						assets: Wild(AllCounted(1)),
+						dest: asset_hub_rococo_location(),
+						// executes on Rococo Asset Hub
+						xcm: Xcm::<()>(vec![
+							BuyExecution {
+								fees: (wnd_at_rococo_parachains.clone(), amount / 2).into(),
+								weight_limit: Unlimited,
+							},
+							DepositReserveAsset {
+								assets: Wild(AllCounted(1)),
+								dest: AssetHubRococo::sibling_location_of(PenpalA::para_id()),
+								// executes on Rococo Penpal
+								xcm: Xcm::<()>(vec![
+									BuyExecution {
+										fees: (wnd_at_rococo_parachains.clone(), amount / 2).into(),
+										weight_limit: Unlimited,
+									},
+									DepositAsset { assets: Wild(AllCounted(1)), beneficiary },
+								]),
+							},
+						]),
+					},
+				]),
+			},
+		]);
+		send_assets_over_bridge(|| {
+			// send message over bridge
+			assert_ok!(Westend::execute_with(|| {
+				Dmp::<<Westend as Chain>::Runtime>::make_parachain_reachable(
+					AssetHubWestend::para_id(),
+				);
+				let signed_origin = <Westend as Chain>::RuntimeOrigin::signed(WestendSender::get());
+				<Westend as WestendPallet>::XcmPallet::execute(
+					signed_origin,
+					bx!(xcm::VersionedXcm::V5(kusama_xcm.into())),
+					Weight::MAX,
+				)
+			}));
+			AssetHubWestend::execute_with(|| {
+				type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+				assert_expected_events!(
+					AssetHubWestend,
+					vec![
+						// Amount deposited in AHR's sovereign account
+						RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => {
+							who: *who == sov_ahr_on_ahw.clone().into(),
+						},
+						RuntimeEvent::XcmpQueue(
+							cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }
+						) => {},
+					]
+				);
+			});
+		});
+	}
+
+	// process AHR incoming message and check events
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				// issue WNDs on AHR
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},
+				// message processed successfully
+				RuntimeEvent::MessageQueue(
+					pallet_message_queue::Event::Processed { success: true, .. }
+				) => {},
+			]
+		);
+	});
+	PenpalA::execute_with(|| {
+		PenpalA::assert_xcmp_queue_success(None);
+	});
+
+	let sender_wnds_after = <Westend as Chain>::account_data_of(sender.clone()).free;
+	let receiver_wnds_after = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(wnd_at_rococo_parachains, &receiver)
+	});
+	let wnds_in_reserve_on_ahw_after =
+		<AssetHubWestend as Chain>::account_data_of(sov_ahr_on_ahw.clone()).free;
+
+	// Sender's balance is reduced
+	assert!(sender_wnds_after < sender_wnds_before);
+	// Receiver's balance is increased
+	assert!(receiver_wnds_after > receiver_wnds_before);
+	// Reserve balance is increased by sent amount (less fess)
+	assert!(wnds_in_reserve_on_ahw_after > wnds_in_reserve_on_ahw_before);
+	assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + amount);
+}
+
 #[test]
 fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() {
 	let roc_at_westend_parachains = bridged_roc_at_ah_westend();
@@ -429,8 +725,8 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc
 
 	// set up ROCs for transfer
 	let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id());
-	let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(penpal_location);
-	let prefund_accounts = vec![(sov_penpal_on_ahr, amount * 2)];
+	let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location);
+	let prefund_accounts = vec![(sov_penpal_on_ahw, amount * 2)];
 	create_foreign_on_ah_westend(roc_at_westend_parachains.clone(), true, prefund_accounts);
 	let asset_owner: AccountId = AssetHubWestend::account_id_of(ALICE);
 	PenpalB::force_create_foreign_asset(
@@ -543,6 +839,372 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc
 	assert!(receiver_rocs_after <= receiver_rocs_before + amount);
 }
 
+#[test]
+fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo_to_penpal_rococo(
+) {
+	let roc_at_westend_parachains = bridged_roc_at_ah_westend();
+	let roc_at_rococo_parachains = Location::parent();
+	let amount = ASSET_HUB_WESTEND_ED * 10_000_000;
+	let sender = PenpalBSender::get();
+	let receiver = PenpalAReceiver::get();
+
+	// set up ROCs for transfer
+	let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id());
+	let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location);
+	let prefund_accounts = vec![(sov_penpal_on_ahw.clone(), amount * 2)];
+	create_foreign_on_ah_westend(roc_at_westend_parachains.clone(), true, prefund_accounts);
+	create_pool_with_native_on!(
+		AssetHubWestend,
+		roc_at_westend_parachains.clone(),
+		true,
+		AssetHubRococoSender::get()
+	);
+	let asset_owner: AccountId = AssetHubWestend::account_id_of(ALICE);
+	// Fund WNDs on Westend Penpal
+	PenpalB::mint_foreign_asset(
+		<PenpalB as Chain>::RuntimeOrigin::signed(PenpalAssetOwner::get()),
+		Location::parent(),
+		sender.clone(),
+		amount,
+	);
+	// Create and fund bridged ROCs on Westend Penpal
+	PenpalB::force_create_foreign_asset(
+		roc_at_westend_parachains.clone(),
+		asset_owner.clone(),
+		true,
+		ASSET_MIN_BALANCE,
+		vec![(sender.clone(), amount * 2)],
+	);
+	// Configure source Penpal chain to trust local AH as reserve of bridged ROC
+	PenpalB::execute_with(|| {
+		assert_ok!(<PenpalB as Chain>::System::set_storage(
+			<PenpalB as Chain>::RuntimeOrigin::root(),
+			vec![(
+				PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(),
+				roc_at_westend_parachains.encode(),
+			)],
+		));
+	});
+
+	// fund the AHW's SA on AHR with the ROC tokens held in reserve
+	let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus(
+		ByGenesis(WESTEND_GENESIS_HASH),
+		AssetHubWestend::para_id(),
+	);
+	AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), amount * 2)]);
+
+	// balances before
+	let sender_rocs_before = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(roc_at_westend_parachains.clone().into(), &sender)
+	});
+	let receiver_rocs_before = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(roc_at_rococo_parachains.clone(), &receiver)
+	});
+
+	// send ROCs over the bridge, all fees paid with ROC along the way
+	{
+		let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id());
+		let beneficiary: Location =
+			AccountId32Junction { network: None, id: receiver.clone().into() }.into();
+		// executes on Penpal Westend
+		let xcm = Xcm::<()>(vec![
+			WithdrawAsset((roc_at_westend_parachains.clone(), amount).into()),
+			SetFeesMode { jit_withdraw: true },
+			InitiateReserveWithdraw {
+				assets: Wild(AllCounted(1)),
+				reserve: local_asset_hub,
+				// executes on Westend Asset Hub
+				xcm: Xcm::<()>(vec![
+					BuyExecution {
+						fees: (roc_at_westend_parachains.clone(), amount / 2).into(),
+						weight_limit: Unlimited,
+					},
+					InitiateReserveWithdraw {
+						assets: Wild(AllCounted(1)),
+						reserve: asset_hub_rococo_location(),
+						// executes on Rococo Asset Hub
+						xcm: Xcm::<()>(vec![
+							BuyExecution {
+								fees: (roc_at_rococo_parachains.clone(), amount / 2).into(),
+								weight_limit: Unlimited,
+							},
+							DepositReserveAsset {
+								assets: Wild(AllCounted(1)),
+								dest: AssetHubRococo::sibling_location_of(PenpalA::para_id()),
+								// executes on Rococo Penpal
+								xcm: Xcm::<()>(vec![
+									BuyExecution {
+										fees: (roc_at_rococo_parachains.clone(), amount / 2).into(),
+										weight_limit: Unlimited,
+									},
+									DepositAsset { assets: Wild(AllCounted(1)), beneficiary },
+								]),
+							},
+						]),
+					},
+				]),
+			},
+		]);
+		send_assets_over_bridge(|| {
+			// send message over bridge
+			assert_ok!(PenpalB::execute_with(|| {
+				let signed_origin = <PenpalB as Chain>::RuntimeOrigin::signed(sender.clone());
+				<PenpalB as PenpalBPallet>::PolkadotXcm::execute(
+					signed_origin,
+					bx!(xcm::VersionedXcm::V5(xcm.into())),
+					Weight::MAX,
+				)
+			}));
+			AssetHubWestend::execute_with(|| {
+				type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+				assert_expected_events!(
+					AssetHubWestend,
+					vec![
+						// Amount to reserve transfer is withdrawn from Penpal's sovereign account
+						RuntimeEvent::ForeignAssets(
+							pallet_assets::Event::Burned { asset_id, owner, .. }
+						) => {
+							asset_id: asset_id == &roc_at_westend_parachains,
+							owner: owner == &sov_penpal_on_ahw,
+						},
+						RuntimeEvent::XcmpQueue(
+							cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }
+						) => {},
+						// message processed successfully
+						RuntimeEvent::MessageQueue(
+							pallet_message_queue::Event::Processed { success: true, .. }
+						) => {},
+					]
+				);
+			});
+		});
+	}
+
+	// process AHR incoming message and check events
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				// burn ROCs from AHW's SA on AHR
+				RuntimeEvent::Balances(
+					pallet_balances::Event::Burned { who, .. }
+				) => {
+					who: *who == sov_ahw_on_ahr.clone().into(),
+				},
+				// sent message to sibling Penpal
+				RuntimeEvent::XcmpQueue(
+					cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }
+				) => {},
+				// message processed successfully
+				RuntimeEvent::MessageQueue(
+					pallet_message_queue::Event::Processed { success: true, .. }
+				) => {},
+			]
+		);
+	});
+	PenpalA::execute_with(|| {
+		PenpalA::assert_xcmp_queue_success(None);
+	});
+
+	let sender_rocs_after = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(roc_at_westend_parachains.into(), &sender)
+	});
+	let receiver_rocs_after = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(roc_at_rococo_parachains.clone(), &receiver)
+	});
+
+	// Sender's balance is reduced by sent "amount"
+	assert_eq!(sender_rocs_after, sender_rocs_before - amount);
+	// Receiver's balance is increased by no more than "amount"
+	assert!(receiver_rocs_after > receiver_rocs_before);
+	assert!(receiver_rocs_after <= receiver_rocs_before + amount);
+}
+
+#[test]
+fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo_to_rococo_relay(
+) {
+	let roc_at_westend_parachains = bridged_roc_at_ah_westend();
+	let roc_at_rococo_parachains = Location::parent();
+	let amount = ASSET_HUB_WESTEND_ED * 10_000_000;
+	let sender = PenpalBSender::get();
+	let receiver = RococoReceiver::get();
+
+	// set up ROCs for transfer
+	let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id());
+	let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location);
+	let prefund_accounts = vec![(sov_penpal_on_ahw.clone(), amount * 2)];
+	create_foreign_on_ah_westend(roc_at_westend_parachains.clone(), true, prefund_accounts);
+	create_pool_with_native_on!(
+		AssetHubWestend,
+		roc_at_westend_parachains.clone(),
+		true,
+		AssetHubRococoSender::get()
+	);
+	let asset_owner: AccountId = AssetHubWestend::account_id_of(ALICE);
+	// Fund WNDs on Westend Penpal
+	PenpalB::mint_foreign_asset(
+		<PenpalB as Chain>::RuntimeOrigin::signed(PenpalAssetOwner::get()),
+		Location::parent(),
+		sender.clone(),
+		amount,
+	);
+	// Create and fund bridged ROCs on Westend Penpal
+	PenpalB::force_create_foreign_asset(
+		roc_at_westend_parachains.clone(),
+		asset_owner.clone(),
+		true,
+		ASSET_MIN_BALANCE,
+		vec![(sender.clone(), amount * 2)],
+	);
+	// Configure source Penpal chain to trust local AH as reserve of bridged ROC
+	PenpalB::execute_with(|| {
+		assert_ok!(<PenpalB as Chain>::System::set_storage(
+			<PenpalB as Chain>::RuntimeOrigin::root(),
+			vec![(
+				PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(),
+				roc_at_westend_parachains.encode(),
+			)],
+		));
+	});
+
+	// fund the AHW's SA on AHR with the ROC tokens held in reserve
+	let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus(
+		ByGenesis(WESTEND_GENESIS_HASH),
+		AssetHubWestend::para_id(),
+	);
+	AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), amount * 2)]);
+
+	// fund Rococo Relay check account so we can teleport back to it
+	Rococo::fund_accounts(vec![(<Rococo as RococoPallet>::XcmPallet::check_account(), amount)]);
+
+	// balances before
+	let sender_rocs_before = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(roc_at_westend_parachains.clone().into(), &sender)
+	});
+	let receiver_rocs_before = <Rococo as Chain>::account_data_of(receiver.clone()).free;
+
+	// send ROCs over the bridge, all fees paid with ROC along the way
+	{
+		let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id());
+		let beneficiary: Location =
+			AccountId32Junction { network: None, id: receiver.clone().into() }.into();
+		// executes on Penpal Westend
+		let xcm = Xcm::<()>(vec![
+			WithdrawAsset((roc_at_westend_parachains.clone(), amount).into()),
+			SetFeesMode { jit_withdraw: true },
+			InitiateReserveWithdraw {
+				assets: Wild(AllCounted(1)),
+				reserve: local_asset_hub,
+				// executes on Westend Asset Hub
+				xcm: Xcm::<()>(vec![
+					BuyExecution {
+						fees: (roc_at_westend_parachains.clone(), amount / 2).into(),
+						weight_limit: Unlimited,
+					},
+					InitiateReserveWithdraw {
+						assets: Wild(AllCounted(1)),
+						reserve: asset_hub_rococo_location(),
+						// executes on Rococo Asset Hub
+						xcm: Xcm::<()>(vec![
+							BuyExecution {
+								fees: (roc_at_rococo_parachains.clone(), amount / 2).into(),
+								weight_limit: Unlimited,
+							},
+							InitiateTeleport {
+								assets: Wild(AllCounted(1)),
+								dest: Location::parent(),
+								// executes on Rococo Relay
+								xcm: Xcm::<()>(vec![
+									BuyExecution {
+										fees: (Location::here(), amount / 2).into(),
+										weight_limit: Unlimited,
+									},
+									DepositAsset { assets: Wild(AllCounted(1)), beneficiary },
+								]),
+							},
+						]),
+					},
+				]),
+			},
+		]);
+		send_assets_over_bridge(|| {
+			// send message over bridge
+			assert_ok!(PenpalB::execute_with(|| {
+				let signed_origin = <PenpalB as Chain>::RuntimeOrigin::signed(sender.clone());
+				<PenpalB as PenpalBPallet>::PolkadotXcm::execute(
+					signed_origin,
+					bx!(xcm::VersionedXcm::V5(xcm.into())),
+					Weight::MAX,
+				)
+			}));
+			AssetHubWestend::execute_with(|| {
+				type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+				assert_expected_events!(
+					AssetHubWestend,
+					vec![
+						// Amount to reserve transfer is withdrawn from Penpal's sovereign account
+						RuntimeEvent::ForeignAssets(
+							pallet_assets::Event::Burned { asset_id, owner, .. }
+						) => {
+							asset_id: asset_id == &roc_at_westend_parachains,
+							owner: owner == &sov_penpal_on_ahw,
+						},
+						RuntimeEvent::XcmpQueue(
+							cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }
+						) => {},
+						// message processed successfully
+						RuntimeEvent::MessageQueue(
+							pallet_message_queue::Event::Processed { success: true, .. }
+						) => {},
+					]
+				);
+			});
+		});
+	}
+
+	// process AHR incoming message and check events
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				// burn ROCs from AHW's SA on AHR
+				RuntimeEvent::Balances(
+					pallet_balances::Event::Burned { who, .. }
+				) => {
+					who: *who == sov_ahw_on_ahr.clone().into(),
+				},
+				// sent message to Rococo Relay
+				RuntimeEvent::ParachainSystem(
+					cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }
+				) => {},
+				// message processed successfully
+				RuntimeEvent::MessageQueue(
+					pallet_message_queue::Event::Processed { success: true, .. }
+				) => {},
+			]
+		);
+	});
+
+	let sender_rocs_after = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(roc_at_westend_parachains.into(), &sender)
+	});
+	let receiver_rocs_after = <Rococo as Chain>::account_data_of(receiver.clone()).free;
+
+	// Sender's balance is reduced by sent "amount"
+	assert_eq!(sender_rocs_after, sender_rocs_before - amount);
+	// Receiver's balance is increased by no more than "amount"
+	assert!(receiver_rocs_after > receiver_rocs_before);
+	assert!(receiver_rocs_after <= receiver_rocs_before + amount);
+}
+
 #[test]
 fn dry_run_transfer_to_rococo_sends_xcm_to_bridge_hub() {
 	test_dry_run_transfer_across_pk_bridge!(
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
index 6e27aaf88b4bc7d86eab75230f6dcb4651d9a2a7..15c1a822b756c211508cc961bed835d73366dce8 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
@@ -832,6 +832,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -1158,6 +1159,10 @@ pub type Migrations = (
 		Runtime,
 		TrustBackedAssetsInstance,
 	>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_utility.rs
index 80afbde1b1e0788948ef69bfb0f1238bdde7904b..d70c1947c4e34f3400377cbd452136dd888b2c8c 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_utility.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_utility.rs
@@ -99,6 +99,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 1_745
 			.saturating_add(Weight::from_parts(6_562_902, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 750c1632f7193a356146747066ed4811829b0aef..3ffa0c19b9461f099664f5cf16a9a425cfea12d5 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -884,6 +884,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -1255,6 +1256,10 @@ pub type Migrations = (
 		Runtime,
 		TrustBackedAssetsInstance,
 	>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_utility.rs
index ef6d9fb4ba945f46689722d363d4b5ec42dad3bc..240779520a0b28b669feaaf1d83936a54afe71e4 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_utility.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_utility.rs
@@ -98,6 +98,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 3_765
 			.saturating_add(Weight::from_parts(6_028_416, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
index c940af694b220ad31ad198cd59ee650291b55dd9..140056e724a79836c3a20594735bcc356fdd2698 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
@@ -182,6 +182,10 @@ pub type Migrations = (
 		RocksDbWeight,
 	>,
 	pallet_bridge_relayers::migration::v1::MigrationToV1<Runtime, ()>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -494,6 +498,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_utility.rs
index 4e531593f4d532fa8b60076e5cce4f7b0c5deb90..0c5a7cf0aeb34a9dba013daadd1ddb00e312af9e 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_utility.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_utility.rs
@@ -98,6 +98,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 1_601
 			.saturating_add(Weight::from_parts(5_138_293, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
index 01f72fd055a257ae52426bc7ab375fc646a1db8f..83712df8295b78c1c3f9de628c50c8ddc33be698 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
@@ -169,6 +169,10 @@ pub type Migrations = (
 		ConstU32<ASSET_HUB_ID>,
 	>,
 	bridge_to_ethereum_config::migrations::MigrationForXcmV5<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -475,6 +479,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_utility.rs
index 5d05e89e45932b3e1044d9afe287bf35e6d64632..b81d217f5b0103aa71ba888c6b94dfc6b8ebb522 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_utility.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_utility.rs
@@ -99,6 +99,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 1_601
 			.saturating_add(Weight::from_parts(5_138_293, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+	
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs
index d4a2d3bbf1c7becba8ea6ed560496a0a8114b185..aa1f67d18afe4f6a6c228bd80b234eeea59fd075 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs
@@ -17,7 +17,8 @@
 
 use super::Origin;
 use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS};
-use sp_runtime::Perbill;
+use sp_runtime::{str_array as s, Perbill};
+use sp_std::borrow::Cow;
 
 /// Referendum `TrackId` type.
 pub type TrackId = u16;
@@ -46,13 +47,15 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 
 	type RuntimeOrigin = <RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin;
 
-	/// Return the array of available tracks and their information.
-	fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo<Balance, BlockNumber>)] {
-		static DATA: [(TrackId, pallet_referenda::TrackInfo<Balance, BlockNumber>); 9] = [
-			(
-				constants::AMBASSADOR_TIER_1,
-				pallet_referenda::TrackInfo {
-					name: "ambassador tier 1",
+	/// Return the list of available tracks and their information.
+	fn tracks(
+	) -> impl Iterator<Item = Cow<'static, pallet_referenda::Track<Self::Id, Balance, BlockNumber>>>
+	{
+		static DATA: [pallet_referenda::Track<TrackId, Balance, BlockNumber>; 9] = [
+			pallet_referenda::Track {
+				id: constants::AMBASSADOR_TIER_1,
+				info: pallet_referenda::TrackInfo {
+					name: s("ambassador tier 1"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 24 * HOURS,
@@ -70,11 +73,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				constants::AMBASSADOR_TIER_2,
-				pallet_referenda::TrackInfo {
-					name: "ambassador tier 2",
+			},
+			pallet_referenda::Track {
+				id: constants::AMBASSADOR_TIER_2,
+				info: pallet_referenda::TrackInfo {
+					name: s("ambassador tier 2"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 24 * HOURS,
@@ -92,11 +95,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				constants::SENIOR_AMBASSADOR_TIER_3,
-				pallet_referenda::TrackInfo {
-					name: "senior ambassador tier 3",
+			},
+			pallet_referenda::Track {
+				id: constants::SENIOR_AMBASSADOR_TIER_3,
+				info: pallet_referenda::TrackInfo {
+					name: s("senior ambassador tier 3"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 24 * HOURS,
@@ -114,11 +117,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				constants::SENIOR_AMBASSADOR_TIER_4,
-				pallet_referenda::TrackInfo {
-					name: "senior ambassador tier 4",
+			},
+			pallet_referenda::Track {
+				id: constants::SENIOR_AMBASSADOR_TIER_4,
+				info: pallet_referenda::TrackInfo {
+					name: s("senior ambassador tier 4"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 24 * HOURS,
@@ -136,11 +139,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				constants::HEAD_AMBASSADOR_TIER_5,
-				pallet_referenda::TrackInfo {
-					name: "head ambassador tier 5",
+			},
+			pallet_referenda::Track {
+				id: constants::HEAD_AMBASSADOR_TIER_5,
+				info: pallet_referenda::TrackInfo {
+					name: s("head ambassador tier 5"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 24 * HOURS,
@@ -158,11 +161,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				constants::HEAD_AMBASSADOR_TIER_6,
-				pallet_referenda::TrackInfo {
-					name: "head ambassador tier 6",
+			},
+			pallet_referenda::Track {
+				id: constants::HEAD_AMBASSADOR_TIER_6,
+				info: pallet_referenda::TrackInfo {
+					name: s("head ambassador tier 6"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 24 * HOURS,
@@ -180,11 +183,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				constants::HEAD_AMBASSADOR_TIER_7,
-				pallet_referenda::TrackInfo {
-					name: "head ambassador tier 7",
+			},
+			pallet_referenda::Track {
+				id: constants::HEAD_AMBASSADOR_TIER_7,
+				info: pallet_referenda::TrackInfo {
+					name: s("head ambassador tier 7"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 24 * HOURS,
@@ -202,11 +205,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				constants::MASTER_AMBASSADOR_TIER_8,
-				pallet_referenda::TrackInfo {
-					name: "master ambassador tier 8",
+			},
+			pallet_referenda::Track {
+				id: constants::MASTER_AMBASSADOR_TIER_8,
+				info: pallet_referenda::TrackInfo {
+					name: s("master ambassador tier 8"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 24 * HOURS,
@@ -224,11 +227,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				constants::MASTER_AMBASSADOR_TIER_9,
-				pallet_referenda::TrackInfo {
-					name: "master ambassador tier 9",
+			},
+			pallet_referenda::Track {
+				id: constants::MASTER_AMBASSADOR_TIER_9,
+				info: pallet_referenda::TrackInfo {
+					name: s("master ambassador tier 9"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 24 * HOURS,
@@ -246,9 +249,9 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
+			},
 		];
-		&DATA[..]
+		DATA.iter().map(Cow::Borrowed)
 	}
 
 	/// Determine the voting track for the given `origin`.
@@ -277,6 +280,3 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 		}
 	}
 }
-
-// implements [`frame_support::traits::Get`] for [`TracksInfo`]
-pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber);
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs
index d7959f7724488edbbf3447447b1167b14284e271..6c3c88935dd222b89a63c822efe06a6a473949c7 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs
@@ -18,7 +18,8 @@
 
 use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS, MINUTES};
 use pallet_ranked_collective::Rank;
-use sp_runtime::{traits::Convert, Perbill};
+use sp_runtime::{str_array as s, traits::Convert, Perbill};
+use sp_std::borrow::Cow;
 
 /// Referendum `TrackId` type.
 pub type TrackId = u16;
@@ -114,13 +115,16 @@ pub struct TracksInfo;
 impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 	type Id = TrackId;
 	type RuntimeOrigin = <RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin;
-	fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo<Balance, BlockNumber>)] {
+
+	fn tracks(
+	) -> impl Iterator<Item = Cow<'static, pallet_referenda::Track<Self::Id, Balance, BlockNumber>>>
+	{
 		use constants as tracks;
-		static DATA: [(TrackId, pallet_referenda::TrackInfo<Balance, BlockNumber>); 21] = [
-			(
-				tracks::MEMBERS,
-				pallet_referenda::TrackInfo {
-					name: "members",
+		static DATA: [pallet_referenda::Track<TrackId, Balance, BlockNumber>; 21] = [
+			pallet_referenda::Track {
+				id: tracks::MEMBERS,
+				info: pallet_referenda::TrackInfo {
+					name: s("members"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 30 * MINUTES,
@@ -138,11 +142,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				tracks::PROFICIENTS,
-				pallet_referenda::TrackInfo {
-					name: "proficient members",
+			},
+			pallet_referenda::Track {
+				id: tracks::PROFICIENTS,
+				info: pallet_referenda::TrackInfo {
+					name: s("proficient members"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 30 * MINUTES,
@@ -160,11 +164,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				tracks::FELLOWS,
-				pallet_referenda::TrackInfo {
-					name: "fellows",
+			},
+			pallet_referenda::Track {
+				id: tracks::FELLOWS,
+				info: pallet_referenda::TrackInfo {
+					name: s("fellows"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 30 * MINUTES,
@@ -182,11 +186,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				tracks::ARCHITECTS,
-				pallet_referenda::TrackInfo {
-					name: "architects",
+			},
+			pallet_referenda::Track {
+				id: tracks::ARCHITECTS,
+				info: pallet_referenda::TrackInfo {
+					name: s("architects"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 30 * MINUTES,
@@ -204,11 +208,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				tracks::ARCHITECTS_ADEPT,
-				pallet_referenda::TrackInfo {
-					name: "architects adept",
+			},
+			pallet_referenda::Track {
+				id: tracks::ARCHITECTS_ADEPT,
+				info: pallet_referenda::TrackInfo {
+					name: s("architects adept"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 30 * MINUTES,
@@ -226,11 +230,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				tracks::GRAND_ARCHITECTS,
-				pallet_referenda::TrackInfo {
-					name: "grand architects",
+			},
+			pallet_referenda::Track {
+				id: tracks::GRAND_ARCHITECTS,
+				info: pallet_referenda::TrackInfo {
+					name: s("grand architects"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 30 * MINUTES,
@@ -248,11 +252,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				tracks::MASTERS,
-				pallet_referenda::TrackInfo {
-					name: "masters",
+			},
+			pallet_referenda::Track {
+				id: tracks::MASTERS,
+				info: pallet_referenda::TrackInfo {
+					name: s("masters"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 30 * MINUTES,
@@ -270,11 +274,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				tracks::MASTERS_CONSTANT,
-				pallet_referenda::TrackInfo {
-					name: "masters constant",
+			},
+			pallet_referenda::Track {
+				id: tracks::MASTERS_CONSTANT,
+				info: pallet_referenda::TrackInfo {
+					name: s("masters constant"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 30 * MINUTES,
@@ -292,11 +296,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				tracks::GRAND_MASTERS,
-				pallet_referenda::TrackInfo {
-					name: "grand masters",
+			},
+			pallet_referenda::Track {
+				id: tracks::GRAND_MASTERS,
+				info: pallet_referenda::TrackInfo {
+					name: s("grand masters"),
 					max_deciding: 10,
 					decision_deposit: 5 * DOLLARS,
 					prepare_period: 30 * MINUTES,
@@ -314,11 +318,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				tracks::RETAIN_AT_1DAN,
-				pallet_referenda::TrackInfo {
-					name: "retain at I Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::RETAIN_AT_1DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("retain at I Dan"),
 					max_deciding: RETAIN_MAX_DECIDING,
 					decision_deposit: RETAIN_DECISION_DEPOSIT,
 					prepare_period: RETAIN_PREPARE_PERIOD,
@@ -328,11 +332,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: RETAIN_MIN_APPROVAL,
 					min_support: RETAIN_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::RETAIN_AT_2DAN,
-				pallet_referenda::TrackInfo {
-					name: "retain at II Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::RETAIN_AT_2DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("retain at II Dan"),
 					max_deciding: RETAIN_MAX_DECIDING,
 					decision_deposit: RETAIN_DECISION_DEPOSIT,
 					prepare_period: RETAIN_PREPARE_PERIOD,
@@ -342,11 +346,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: RETAIN_MIN_APPROVAL,
 					min_support: RETAIN_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::RETAIN_AT_3DAN,
-				pallet_referenda::TrackInfo {
-					name: "retain at III Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::RETAIN_AT_3DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("retain at III Dan"),
 					max_deciding: RETAIN_MAX_DECIDING,
 					decision_deposit: RETAIN_DECISION_DEPOSIT,
 					prepare_period: RETAIN_PREPARE_PERIOD,
@@ -356,11 +360,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: RETAIN_MIN_APPROVAL,
 					min_support: RETAIN_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::RETAIN_AT_4DAN,
-				pallet_referenda::TrackInfo {
-					name: "retain at IV Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::RETAIN_AT_4DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("retain at IV Dan"),
 					max_deciding: RETAIN_MAX_DECIDING,
 					decision_deposit: RETAIN_DECISION_DEPOSIT,
 					prepare_period: RETAIN_PREPARE_PERIOD,
@@ -370,11 +374,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: RETAIN_MIN_APPROVAL,
 					min_support: RETAIN_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::RETAIN_AT_5DAN,
-				pallet_referenda::TrackInfo {
-					name: "retain at V Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::RETAIN_AT_5DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("retain at V Dan"),
 					max_deciding: RETAIN_MAX_DECIDING,
 					decision_deposit: RETAIN_DECISION_DEPOSIT,
 					prepare_period: RETAIN_PREPARE_PERIOD,
@@ -384,11 +388,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: RETAIN_MIN_APPROVAL,
 					min_support: RETAIN_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::RETAIN_AT_6DAN,
-				pallet_referenda::TrackInfo {
-					name: "retain at VI Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::RETAIN_AT_6DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("retain at VI Dan"),
 					max_deciding: RETAIN_MAX_DECIDING,
 					decision_deposit: RETAIN_DECISION_DEPOSIT,
 					prepare_period: RETAIN_PREPARE_PERIOD,
@@ -398,11 +402,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: RETAIN_MIN_APPROVAL,
 					min_support: RETAIN_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::PROMOTE_TO_1DAN,
-				pallet_referenda::TrackInfo {
-					name: "promote to I Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::PROMOTE_TO_1DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("promote to I Dan"),
 					max_deciding: PROMOTE_MAX_DECIDING,
 					decision_deposit: PROMOTE_DECISION_DEPOSIT,
 					prepare_period: PROMOTE_PREPARE_PERIOD,
@@ -412,11 +416,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: PROMOTE_MIN_APPROVAL,
 					min_support: PROMOTE_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::PROMOTE_TO_2DAN,
-				pallet_referenda::TrackInfo {
-					name: "promote to II Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::PROMOTE_TO_2DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("promote to II Dan"),
 					max_deciding: PROMOTE_MAX_DECIDING,
 					decision_deposit: PROMOTE_DECISION_DEPOSIT,
 					prepare_period: PROMOTE_PREPARE_PERIOD,
@@ -426,11 +430,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: PROMOTE_MIN_APPROVAL,
 					min_support: PROMOTE_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::PROMOTE_TO_3DAN,
-				pallet_referenda::TrackInfo {
-					name: "promote to III Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::PROMOTE_TO_3DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("promote to III Dan"),
 					max_deciding: PROMOTE_MAX_DECIDING,
 					decision_deposit: PROMOTE_DECISION_DEPOSIT,
 					prepare_period: PROMOTE_PREPARE_PERIOD,
@@ -440,11 +444,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: PROMOTE_MIN_APPROVAL,
 					min_support: PROMOTE_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::PROMOTE_TO_4DAN,
-				pallet_referenda::TrackInfo {
-					name: "promote to IV Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::PROMOTE_TO_4DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("promote to IV Dan"),
 					max_deciding: PROMOTE_MAX_DECIDING,
 					decision_deposit: PROMOTE_DECISION_DEPOSIT,
 					prepare_period: PROMOTE_PREPARE_PERIOD,
@@ -454,11 +458,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: PROMOTE_MIN_APPROVAL,
 					min_support: PROMOTE_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::PROMOTE_TO_5DAN,
-				pallet_referenda::TrackInfo {
-					name: "promote to V Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::PROMOTE_TO_5DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("promote to V Dan"),
 					max_deciding: PROMOTE_MAX_DECIDING,
 					decision_deposit: PROMOTE_DECISION_DEPOSIT,
 					prepare_period: PROMOTE_PREPARE_PERIOD,
@@ -468,11 +472,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: PROMOTE_MIN_APPROVAL,
 					min_support: PROMOTE_MIN_SUPPORT,
 				},
-			),
-			(
-				tracks::PROMOTE_TO_6DAN,
-				pallet_referenda::TrackInfo {
-					name: "promote to VI Dan",
+			},
+			pallet_referenda::Track {
+				id: tracks::PROMOTE_TO_6DAN,
+				info: pallet_referenda::TrackInfo {
+					name: s("promote to VI Dan"),
 					max_deciding: PROMOTE_MAX_DECIDING,
 					decision_deposit: PROMOTE_DECISION_DEPOSIT,
 					prepare_period: PROMOTE_PREPARE_PERIOD,
@@ -482,9 +486,9 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 					min_approval: PROMOTE_MIN_APPROVAL,
 					min_support: PROMOTE_MIN_SUPPORT,
 				},
-			),
+			},
 		];
-		&DATA[..]
+		DATA.iter().map(Cow::Borrowed)
 	}
 	fn track_for(id: &Self::RuntimeOrigin) -> Result<Self::Id, ()> {
 		use super::origins::Origin;
@@ -529,4 +533,3 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 		}
 	}
 }
-pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber);
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
index c662cd355c73b68740837dd59042dd849be5982c..65f3e27ae9a711ce8f39f7a9b5adad9b802d12b4 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
@@ -502,6 +502,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -772,6 +773,10 @@ type Migrations = (
 	// unreleased
 	pallet_core_fellowship::migration::MigrateV0ToV1<Runtime, AmbassadorCoreInstance>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 );
 
 /// Executive: handles dispatch to the various modules.
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs
index 6887e41099e350b07f941a42cd93705eb2abf732..d959b11649bbbdf0e06dd51c01e63c1f5a688bb8 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs
@@ -98,6 +98,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 1_395
 			.saturating_add(Weight::from_parts(5_000_971, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
index 48c5859715f4bbf7079ec55547dca6b67be67f22..165e60361cd187f36f144b37ca71f2e4e3397762 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
@@ -116,6 +116,10 @@ pub type Migrations = (
 	// unreleased
 	cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4<Runtime>,
 	cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -362,6 +366,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = pallet_session::weights::SubstrateWeight<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
index ccf8003639acddbe78d945a11fef7e086d4f4d92..b477bb4850d1c60e1550c915bee675da480d1e21 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
@@ -127,6 +127,10 @@ pub type Migrations = (
 	pallet_broker::migration::MigrateV1ToV2<Runtime>,
 	pallet_broker::migration::MigrateV2ToV3<Runtime>,
 	pallet_broker::migration::MigrateV3ToV4<Runtime, BrokerMigrationV4BlockConversion>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -397,6 +401,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_utility.rs
index 2619a4180bab7d310e24c70e02ccc56bfe72003d..1bfac221c2c8f6126cf46e71950fdcaf613f501e 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_utility.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_utility.rs
@@ -99,6 +99,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 1_621
 			.saturating_add(Weight::from_parts(3_312_302, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
index 3d544aea469f3844a58db2bf0857f8945cb6351e..964351575de838e42668c64a6733d4e59bbea6c9 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
@@ -127,6 +127,10 @@ pub type Migrations = (
 	pallet_broker::migration::MigrateV1ToV2<Runtime>,
 	pallet_broker::migration::MigrateV2ToV3<Runtime>,
 	pallet_broker::migration::MigrateV3ToV4<Runtime, BrokerMigrationV4BlockConversion>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -398,6 +402,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_utility.rs
index f2c40f33711859feb6042c0dd15f5328fc3d4641..7ca8e00c2a7ddbed1b0e6b7a82b48ac82edd8cff 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_utility.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_utility.rs
@@ -99,6 +99,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 740
 			.saturating_add(Weight::from_parts(2_800_888, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
index 68c51175415c507b3449314713c9324c83709d4e..3766626ba4f632cd8603b51583f41a2782b207bd 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
@@ -114,6 +114,10 @@ pub type UncheckedExtrinsic =
 pub type Migrations = (
 	pallet_collator_selection::migration::v2::MigrationToV2<Runtime>,
 	cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -359,6 +363,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_utility.rs
index f30f0776952625dd9c9fa6f5dff2894ab71aa591..0871b257d39c308772e40fe604b61a557bc1d0a8 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_utility.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_utility.rs
@@ -96,6 +96,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 3_915
 			.saturating_add(Weight::from_parts(4_372_646, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
index 980fb8db473204cd1a41adc55310f59b5dc1529a..34ab85f3d0cdd81d593d24a96ac4a3bbb4c37144 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
@@ -113,6 +113,10 @@ pub type UncheckedExtrinsic =
 /// Migrations to apply on runtime upgrade.
 pub type Migrations = (
 	pallet_collator_selection::migration::v2::MigrationToV2<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -358,6 +362,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_utility.rs
index c7f98f70fdd84bd88565bb9c133db6c2c36f7b4c..d8def37891d4dfe4f41d6a2f71b5ef0afd5a8e1c 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_utility.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_utility.rs
@@ -96,6 +96,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 7_605
 			.saturating_add(Weight::from_parts(4_306_193, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
index ed6e014417d457ba0b51819427ac94db557574e0..dfccf3ec9860db1c50d88ca78986010f0c805a39 100644
--- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
+++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
@@ -162,6 +162,10 @@ pub type UncheckedExtrinsic =
 pub type Migrations = (
 	pallet_balances::migration::MigrateToTrackInactive<Runtime, xcm_config::CheckingAccount>,
 	pallet_collator_selection::migration::v1::MigrateToV1<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 );
 
 /// Executive: handles dispatch to the various modules.
@@ -730,6 +734,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/docs/sdk/src/guides/your_first_node.rs b/docs/sdk/src/guides/your_first_node.rs
index 3c782e4793bab422a0ed06e8705ad58457910414..90fa8639f75776042b2f176bebab039f456d4ca5 100644
--- a/docs/sdk/src/guides/your_first_node.rs
+++ b/docs/sdk/src/guides/your_first_node.rs
@@ -305,4 +305,33 @@ mod tests {
 			test_runtime_preset(FIRST_RUNTIME, 1000, preset);
 		});
 	}
+
+	#[test]
+	fn omni_node_dev_mode_works() {
+		//Omni Node in dev mode works with parachain's template `dev_chain_spec`
+		let dev_chain_spec = std::env::current_dir()
+			.unwrap()
+			.parent()
+			.unwrap()
+			.parent()
+			.unwrap()
+			.join("templates")
+			.join("parachain")
+			.join("dev_chain_spec.json");
+
+		maybe_build_omni_node();
+		let omni_node = find_release_binary(OMNI_NODE).unwrap();
+
+		let output = Command::new(omni_node)
+			.arg("--dev")
+			.args(["--chain", dev_chain_spec.to_str().unwrap()])
+			.timeout(std::time::Duration::from_secs(70))
+			.output()
+			.unwrap();
+
+		// atleast  blocks should be imported
+		assert!(String::from_utf8(output.stderr)
+			.unwrap()
+			.contains(format!("Imported #{}", 7).to_string().as_str()));
+	}
 }
diff --git a/polkadot/runtime/common/src/claims/mod.rs b/polkadot/runtime/common/src/claims/mod.rs
index f48e40ee188789f91733cab5f6486fcc94760461..9e084688b4e558ec3917e6651e2a15204adaa4cd 100644
--- a/polkadot/runtime/common/src/claims/mod.rs
+++ b/polkadot/runtime/common/src/claims/mod.rs
@@ -130,7 +130,7 @@ impl Default for StatementKind {
 #[derive(
 	Clone, Copy, PartialEq, Eq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen,
 )]
-pub struct EthereumAddress([u8; 20]);
+pub struct EthereumAddress(pub [u8; 20]);
 
 impl Serialize for EthereumAddress {
 	fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@@ -239,11 +239,11 @@ pub mod pallet {
 
 	/// The statement kind that must be signed, if any.
 	#[pallet::storage]
-	pub(super) type Signing<T> = StorageMap<_, Identity, EthereumAddress, StatementKind>;
+	pub type Signing<T> = StorageMap<_, Identity, EthereumAddress, StatementKind>;
 
 	/// Pre-claimed Ethereum accounts, by the Account ID that they are claimed to.
 	#[pallet::storage]
-	pub(super) type Preclaims<T: Config> = StorageMap<_, Identity, T::AccountId, EthereumAddress>;
+	pub type Preclaims<T: Config> = StorageMap<_, Identity, T::AccountId, EthereumAddress>;
 
 	#[pallet::genesis_config]
 	#[derive(DefaultNoBound)]
diff --git a/polkadot/runtime/rococo/src/governance/fellowship.rs b/polkadot/runtime/rococo/src/governance/fellowship.rs
index 231defab6aa52419b42131a510637e0b1c04c889..cb194e05e9ce904a412edf8ec1c96ccd7ad31c06 100644
--- a/polkadot/runtime/rococo/src/governance/fellowship.rs
+++ b/polkadot/runtime/rococo/src/governance/fellowship.rs
@@ -16,8 +16,13 @@
 
 //! Elements of governance concerning the Rococo Fellowship.
 
+use alloc::borrow::Cow;
 use frame_support::traits::{MapSuccess, TryMapSuccess};
-use sp_runtime::traits::{CheckedReduceBy, ConstU16, Replace, ReplaceWithDefault};
+use pallet_referenda::{Track, TrackInfo};
+use sp_runtime::{
+	str_array as s,
+	traits::{CheckedReduceBy, ConstU16, Replace, ReplaceWithDefault},
+};
 
 use super::*;
 use crate::{CENTS, DAYS};
@@ -32,12 +37,13 @@ pub struct TracksInfo;
 impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 	type Id = u16;
 	type RuntimeOrigin = <RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin;
-	fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo<Balance, BlockNumber>)] {
-		static DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 10] = [
-			(
-				0u16,
-				pallet_referenda::TrackInfo {
-					name: "candidates",
+
+	fn tracks() -> impl Iterator<Item = Cow<'static, Track<Self::Id, Balance, BlockNumber>>> {
+		static DATA: [Track<u16, Balance, BlockNumber>; 10] = [
+			Track {
+				id: 0u16,
+				info: TrackInfo {
+					name: s("candidates"),
 					max_deciding: 10,
 					decision_deposit: 100 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -55,11 +61,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				1u16,
-				pallet_referenda::TrackInfo {
-					name: "members",
+			},
+			Track {
+				id: 1u16,
+				info: TrackInfo {
+					name: s("members"),
 					max_deciding: 10,
 					decision_deposit: 10 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -77,11 +83,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				2u16,
-				pallet_referenda::TrackInfo {
-					name: "proficients",
+			},
+			Track {
+				id: 2u16,
+				info: TrackInfo {
+					name: s("proficients"),
 					max_deciding: 10,
 					decision_deposit: 10 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -99,11 +105,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				3u16,
-				pallet_referenda::TrackInfo {
-					name: "fellows",
+			},
+			Track {
+				id: 3u16,
+				info: TrackInfo {
+					name: s("fellows"),
 					max_deciding: 10,
 					decision_deposit: 10 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -121,11 +127,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				4u16,
-				pallet_referenda::TrackInfo {
-					name: "senior fellows",
+			},
+			Track {
+				id: 4u16,
+				info: TrackInfo {
+					name: s("senior fellows"),
 					max_deciding: 10,
 					decision_deposit: 10 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -143,11 +149,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				5u16,
-				pallet_referenda::TrackInfo {
-					name: "experts",
+			},
+			Track {
+				id: 5u16,
+				info: TrackInfo {
+					name: s("experts"),
 					max_deciding: 10,
 					decision_deposit: 1 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -165,11 +171,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				6u16,
-				pallet_referenda::TrackInfo {
-					name: "senior experts",
+			},
+			Track {
+				id: 6u16,
+				info: TrackInfo {
+					name: s("senior experts"),
 					max_deciding: 10,
 					decision_deposit: 1 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -187,11 +193,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				7u16,
-				pallet_referenda::TrackInfo {
-					name: "masters",
+			},
+			Track {
+				id: 7u16,
+				info: TrackInfo {
+					name: s("masters"),
 					max_deciding: 10,
 					decision_deposit: 1 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -209,11 +215,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				8u16,
-				pallet_referenda::TrackInfo {
-					name: "senior masters",
+			},
+			Track {
+				id: 8u16,
+				info: TrackInfo {
+					name: s("senior masters"),
 					max_deciding: 10,
 					decision_deposit: 1 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -231,11 +237,11 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
-			(
-				9u16,
-				pallet_referenda::TrackInfo {
-					name: "grand masters",
+			},
+			Track {
+				id: 9u16,
+				info: TrackInfo {
+					name: s("grand masters"),
 					max_deciding: 10,
 					decision_deposit: 1 * 3 * CENTS,
 					prepare_period: 30 * MINUTES,
@@ -253,9 +259,9 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 						ceil: Perbill::from_percent(50),
 					},
 				},
-			),
+			},
 		];
-		&DATA[..]
+		DATA.iter().map(Cow::Borrowed)
 	}
 	fn track_for(id: &Self::RuntimeOrigin) -> Result<Self::Id, ()> {
 		use super::origins::Origin;
@@ -285,7 +291,6 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 		}
 	}
 }
-pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber);
 
 pub type FellowshipReferendaInstance = pallet_referenda::Instance2;
 
diff --git a/polkadot/runtime/rococo/src/governance/tracks.rs b/polkadot/runtime/rococo/src/governance/tracks.rs
index 3765569f183e0414a10fe2852e528ccc9dedc3d7..62229ff5ef5c9668bdc9a3e378dc6c8cb55bc820 100644
--- a/polkadot/runtime/rococo/src/governance/tracks.rs
+++ b/polkadot/runtime/rococo/src/governance/tracks.rs
@@ -18,6 +18,9 @@
 
 use super::*;
 
+use alloc::borrow::Cow;
+use sp_runtime::str_array as s;
+
 const fn percent(x: i32) -> sp_arithmetic::FixedI64 {
 	sp_arithmetic::FixedI64::from_rational(x as u128, 100)
 }
@@ -65,11 +68,11 @@ const APP_WHITELISTED_CALLER: Curve =
 const SUP_WHITELISTED_CALLER: Curve =
 	Curve::make_reciprocal(1, 28, percent(20), percent(5), percent(50));
 
-const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15] = [
-	(
-		0,
-		pallet_referenda::TrackInfo {
-			name: "root",
+const TRACKS_DATA: [pallet_referenda::Track<u16, Balance, BlockNumber>; 15] = [
+	pallet_referenda::Track {
+		id: 0,
+		info: pallet_referenda::TrackInfo {
+			name: s("root"),
 			max_deciding: 1,
 			decision_deposit: 100 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -79,11 +82,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_ROOT,
 			min_support: SUP_ROOT,
 		},
-	),
-	(
-		1,
-		pallet_referenda::TrackInfo {
-			name: "whitelisted_caller",
+	},
+	pallet_referenda::Track {
+		id: 1,
+		info: pallet_referenda::TrackInfo {
+			name: s("whitelisted_caller"),
 			max_deciding: 100,
 			decision_deposit: 10 * GRAND,
 			prepare_period: 6 * MINUTES,
@@ -93,11 +96,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_WHITELISTED_CALLER,
 			min_support: SUP_WHITELISTED_CALLER,
 		},
-	),
-	(
-		10,
-		pallet_referenda::TrackInfo {
-			name: "staking_admin",
+	},
+	pallet_referenda::Track {
+		id: 10,
+		info: pallet_referenda::TrackInfo {
+			name: s("staking_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -107,11 +110,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_STAKING_ADMIN,
 			min_support: SUP_STAKING_ADMIN,
 		},
-	),
-	(
-		11,
-		pallet_referenda::TrackInfo {
-			name: "treasurer",
+	},
+	pallet_referenda::Track {
+		id: 11,
+		info: pallet_referenda::TrackInfo {
+			name: s("treasurer"),
 			max_deciding: 10,
 			decision_deposit: 1 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -121,11 +124,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_TREASURER,
 			min_support: SUP_TREASURER,
 		},
-	),
-	(
-		12,
-		pallet_referenda::TrackInfo {
-			name: "lease_admin",
+	},
+	pallet_referenda::Track {
+		id: 12,
+		info: pallet_referenda::TrackInfo {
+			name: s("lease_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -135,11 +138,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_LEASE_ADMIN,
 			min_support: SUP_LEASE_ADMIN,
 		},
-	),
-	(
-		13,
-		pallet_referenda::TrackInfo {
-			name: "fellowship_admin",
+	},
+	pallet_referenda::Track {
+		id: 13,
+		info: pallet_referenda::TrackInfo {
+			name: s("fellowship_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -149,11 +152,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_FELLOWSHIP_ADMIN,
 			min_support: SUP_FELLOWSHIP_ADMIN,
 		},
-	),
-	(
-		14,
-		pallet_referenda::TrackInfo {
-			name: "general_admin",
+	},
+	pallet_referenda::Track {
+		id: 14,
+		info: pallet_referenda::TrackInfo {
+			name: s("general_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -163,11 +166,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_GENERAL_ADMIN,
 			min_support: SUP_GENERAL_ADMIN,
 		},
-	),
-	(
-		15,
-		pallet_referenda::TrackInfo {
-			name: "auction_admin",
+	},
+	pallet_referenda::Track {
+		id: 15,
+		info: pallet_referenda::TrackInfo {
+			name: s("auction_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -177,11 +180,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_AUCTION_ADMIN,
 			min_support: SUP_AUCTION_ADMIN,
 		},
-	),
-	(
-		20,
-		pallet_referenda::TrackInfo {
-			name: "referendum_canceller",
+	},
+	pallet_referenda::Track {
+		id: 20,
+		info: pallet_referenda::TrackInfo {
+			name: s("referendum_canceller"),
 			max_deciding: 1_000,
 			decision_deposit: 10 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -191,11 +194,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_REFERENDUM_CANCELLER,
 			min_support: SUP_REFERENDUM_CANCELLER,
 		},
-	),
-	(
-		21,
-		pallet_referenda::TrackInfo {
-			name: "referendum_killer",
+	},
+	pallet_referenda::Track {
+		id: 21,
+		info: pallet_referenda::TrackInfo {
+			name: s("referendum_killer"),
 			max_deciding: 1_000,
 			decision_deposit: 50 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -205,11 +208,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_REFERENDUM_KILLER,
 			min_support: SUP_REFERENDUM_KILLER,
 		},
-	),
-	(
-		30,
-		pallet_referenda::TrackInfo {
-			name: "small_tipper",
+	},
+	pallet_referenda::Track {
+		id: 30,
+		info: pallet_referenda::TrackInfo {
+			name: s("small_tipper"),
 			max_deciding: 200,
 			decision_deposit: 1 * 3 * CENTS,
 			prepare_period: 1 * MINUTES,
@@ -219,11 +222,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_SMALL_TIPPER,
 			min_support: SUP_SMALL_TIPPER,
 		},
-	),
-	(
-		31,
-		pallet_referenda::TrackInfo {
-			name: "big_tipper",
+	},
+	pallet_referenda::Track {
+		id: 31,
+		info: pallet_referenda::TrackInfo {
+			name: s("big_tipper"),
 			max_deciding: 100,
 			decision_deposit: 10 * 3 * CENTS,
 			prepare_period: 4 * MINUTES,
@@ -233,11 +236,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_BIG_TIPPER,
 			min_support: SUP_BIG_TIPPER,
 		},
-	),
-	(
-		32,
-		pallet_referenda::TrackInfo {
-			name: "small_spender",
+	},
+	pallet_referenda::Track {
+		id: 32,
+		info: pallet_referenda::TrackInfo {
+			name: s("small_spender"),
 			max_deciding: 50,
 			decision_deposit: 100 * 3 * CENTS,
 			prepare_period: 10 * MINUTES,
@@ -247,11 +250,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_SMALL_SPENDER,
 			min_support: SUP_SMALL_SPENDER,
 		},
-	),
-	(
-		33,
-		pallet_referenda::TrackInfo {
-			name: "medium_spender",
+	},
+	pallet_referenda::Track {
+		id: 33,
+		info: pallet_referenda::TrackInfo {
+			name: s("medium_spender"),
 			max_deciding: 50,
 			decision_deposit: 200 * 3 * CENTS,
 			prepare_period: 10 * MINUTES,
@@ -261,11 +264,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_MEDIUM_SPENDER,
 			min_support: SUP_MEDIUM_SPENDER,
 		},
-	),
-	(
-		34,
-		pallet_referenda::TrackInfo {
-			name: "big_spender",
+	},
+	pallet_referenda::Track {
+		id: 34,
+		info: pallet_referenda::TrackInfo {
+			name: s("big_spender"),
 			max_deciding: 50,
 			decision_deposit: 400 * 3 * CENTS,
 			prepare_period: 10 * MINUTES,
@@ -275,15 +278,18 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_BIG_SPENDER,
 			min_support: SUP_BIG_SPENDER,
 		},
-	),
+	},
 ];
 
 pub struct TracksInfo;
 impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 	type Id = u16;
 	type RuntimeOrigin = <RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin;
-	fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo<Balance, BlockNumber>)] {
-		&TRACKS_DATA[..]
+
+	fn tracks(
+	) -> impl Iterator<Item = Cow<'static, pallet_referenda::Track<Self::Id, Balance, BlockNumber>>>
+	{
+		TRACKS_DATA.iter().map(Cow::Borrowed)
 	}
 	fn track_for(id: &Self::RuntimeOrigin) -> Result<Self::Id, ()> {
 		if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) {
@@ -317,4 +323,3 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 		}
 	}
 }
-pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber);
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index 054ec2aa4a931ff89d29d1f21112eb38f22edd61..61403c001e21013c1df3c37305015f29b2b8e32a 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -482,6 +482,7 @@ impl pallet_session::Config for Runtime {
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, ValidatorManager>;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -1758,6 +1759,9 @@ pub mod migrations {
         parachains_configuration::migration::v12::MigrateToV12<Runtime>,
         parachains_on_demand::migration::MigrateV0ToV1<Runtime>,
 
+		// migrates session storage item
+		pallet_session::migrations::v1::MigrateV0ToV1<Runtime, pallet_session::migrations::v1::InitOffenceSeverity<Runtime>>,
+
         // permanent
         pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
         parachains_inclusion::migration::MigrateToV1<Runtime>,
diff --git a/polkadot/runtime/rococo/src/weights/pallet_utility.rs b/polkadot/runtime/rococo/src/weights/pallet_utility.rs
index 5e580de6aad54d75381d385f768e12395d65eea6..2b1db130801a437692d7db56884215f0c69d55e2 100644
--- a/polkadot/runtime/rococo/src/weights/pallet_utility.rs
+++ b/polkadot/runtime/rococo/src/weights/pallet_utility.rs
@@ -99,6 +99,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 460
 			.saturating_add(Weight::from_parts(3_173_577, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index c0985873532021b066afe587f60a32c2b82dcdd2..fc489e3bc685e1dd4666fa5d702cfee3313084e5 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -318,12 +318,13 @@ impl pallet_session::Config for Runtime {
 	type SessionManager = Staking;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy;
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Runtime {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Runtime>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 pallet_staking_reward_curve::build! {
@@ -401,7 +402,6 @@ impl pallet_staking::Config for Runtime {
 	type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig;
 	type EventListeners = ();
 	type WeightInfo = ();
-	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
 	type MaxValidatorSet = MaxAuthorities;
 	type MaxInvulnerables = ConstU32<20>;
 	type MaxDisabledValidators = ConstU32<100>;
diff --git a/polkadot/runtime/westend/src/governance/tracks.rs b/polkadot/runtime/westend/src/governance/tracks.rs
index 3765569f183e0414a10fe2852e528ccc9dedc3d7..62229ff5ef5c9668bdc9a3e378dc6c8cb55bc820 100644
--- a/polkadot/runtime/westend/src/governance/tracks.rs
+++ b/polkadot/runtime/westend/src/governance/tracks.rs
@@ -18,6 +18,9 @@
 
 use super::*;
 
+use alloc::borrow::Cow;
+use sp_runtime::str_array as s;
+
 const fn percent(x: i32) -> sp_arithmetic::FixedI64 {
 	sp_arithmetic::FixedI64::from_rational(x as u128, 100)
 }
@@ -65,11 +68,11 @@ const APP_WHITELISTED_CALLER: Curve =
 const SUP_WHITELISTED_CALLER: Curve =
 	Curve::make_reciprocal(1, 28, percent(20), percent(5), percent(50));
 
-const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15] = [
-	(
-		0,
-		pallet_referenda::TrackInfo {
-			name: "root",
+const TRACKS_DATA: [pallet_referenda::Track<u16, Balance, BlockNumber>; 15] = [
+	pallet_referenda::Track {
+		id: 0,
+		info: pallet_referenda::TrackInfo {
+			name: s("root"),
 			max_deciding: 1,
 			decision_deposit: 100 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -79,11 +82,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_ROOT,
 			min_support: SUP_ROOT,
 		},
-	),
-	(
-		1,
-		pallet_referenda::TrackInfo {
-			name: "whitelisted_caller",
+	},
+	pallet_referenda::Track {
+		id: 1,
+		info: pallet_referenda::TrackInfo {
+			name: s("whitelisted_caller"),
 			max_deciding: 100,
 			decision_deposit: 10 * GRAND,
 			prepare_period: 6 * MINUTES,
@@ -93,11 +96,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_WHITELISTED_CALLER,
 			min_support: SUP_WHITELISTED_CALLER,
 		},
-	),
-	(
-		10,
-		pallet_referenda::TrackInfo {
-			name: "staking_admin",
+	},
+	pallet_referenda::Track {
+		id: 10,
+		info: pallet_referenda::TrackInfo {
+			name: s("staking_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -107,11 +110,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_STAKING_ADMIN,
 			min_support: SUP_STAKING_ADMIN,
 		},
-	),
-	(
-		11,
-		pallet_referenda::TrackInfo {
-			name: "treasurer",
+	},
+	pallet_referenda::Track {
+		id: 11,
+		info: pallet_referenda::TrackInfo {
+			name: s("treasurer"),
 			max_deciding: 10,
 			decision_deposit: 1 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -121,11 +124,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_TREASURER,
 			min_support: SUP_TREASURER,
 		},
-	),
-	(
-		12,
-		pallet_referenda::TrackInfo {
-			name: "lease_admin",
+	},
+	pallet_referenda::Track {
+		id: 12,
+		info: pallet_referenda::TrackInfo {
+			name: s("lease_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -135,11 +138,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_LEASE_ADMIN,
 			min_support: SUP_LEASE_ADMIN,
 		},
-	),
-	(
-		13,
-		pallet_referenda::TrackInfo {
-			name: "fellowship_admin",
+	},
+	pallet_referenda::Track {
+		id: 13,
+		info: pallet_referenda::TrackInfo {
+			name: s("fellowship_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -149,11 +152,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_FELLOWSHIP_ADMIN,
 			min_support: SUP_FELLOWSHIP_ADMIN,
 		},
-	),
-	(
-		14,
-		pallet_referenda::TrackInfo {
-			name: "general_admin",
+	},
+	pallet_referenda::Track {
+		id: 14,
+		info: pallet_referenda::TrackInfo {
+			name: s("general_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -163,11 +166,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_GENERAL_ADMIN,
 			min_support: SUP_GENERAL_ADMIN,
 		},
-	),
-	(
-		15,
-		pallet_referenda::TrackInfo {
-			name: "auction_admin",
+	},
+	pallet_referenda::Track {
+		id: 15,
+		info: pallet_referenda::TrackInfo {
+			name: s("auction_admin"),
 			max_deciding: 10,
 			decision_deposit: 5 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -177,11 +180,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_AUCTION_ADMIN,
 			min_support: SUP_AUCTION_ADMIN,
 		},
-	),
-	(
-		20,
-		pallet_referenda::TrackInfo {
-			name: "referendum_canceller",
+	},
+	pallet_referenda::Track {
+		id: 20,
+		info: pallet_referenda::TrackInfo {
+			name: s("referendum_canceller"),
 			max_deciding: 1_000,
 			decision_deposit: 10 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -191,11 +194,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_REFERENDUM_CANCELLER,
 			min_support: SUP_REFERENDUM_CANCELLER,
 		},
-	),
-	(
-		21,
-		pallet_referenda::TrackInfo {
-			name: "referendum_killer",
+	},
+	pallet_referenda::Track {
+		id: 21,
+		info: pallet_referenda::TrackInfo {
+			name: s("referendum_killer"),
 			max_deciding: 1_000,
 			decision_deposit: 50 * GRAND,
 			prepare_period: 8 * MINUTES,
@@ -205,11 +208,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_REFERENDUM_KILLER,
 			min_support: SUP_REFERENDUM_KILLER,
 		},
-	),
-	(
-		30,
-		pallet_referenda::TrackInfo {
-			name: "small_tipper",
+	},
+	pallet_referenda::Track {
+		id: 30,
+		info: pallet_referenda::TrackInfo {
+			name: s("small_tipper"),
 			max_deciding: 200,
 			decision_deposit: 1 * 3 * CENTS,
 			prepare_period: 1 * MINUTES,
@@ -219,11 +222,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_SMALL_TIPPER,
 			min_support: SUP_SMALL_TIPPER,
 		},
-	),
-	(
-		31,
-		pallet_referenda::TrackInfo {
-			name: "big_tipper",
+	},
+	pallet_referenda::Track {
+		id: 31,
+		info: pallet_referenda::TrackInfo {
+			name: s("big_tipper"),
 			max_deciding: 100,
 			decision_deposit: 10 * 3 * CENTS,
 			prepare_period: 4 * MINUTES,
@@ -233,11 +236,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_BIG_TIPPER,
 			min_support: SUP_BIG_TIPPER,
 		},
-	),
-	(
-		32,
-		pallet_referenda::TrackInfo {
-			name: "small_spender",
+	},
+	pallet_referenda::Track {
+		id: 32,
+		info: pallet_referenda::TrackInfo {
+			name: s("small_spender"),
 			max_deciding: 50,
 			decision_deposit: 100 * 3 * CENTS,
 			prepare_period: 10 * MINUTES,
@@ -247,11 +250,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_SMALL_SPENDER,
 			min_support: SUP_SMALL_SPENDER,
 		},
-	),
-	(
-		33,
-		pallet_referenda::TrackInfo {
-			name: "medium_spender",
+	},
+	pallet_referenda::Track {
+		id: 33,
+		info: pallet_referenda::TrackInfo {
+			name: s("medium_spender"),
 			max_deciding: 50,
 			decision_deposit: 200 * 3 * CENTS,
 			prepare_period: 10 * MINUTES,
@@ -261,11 +264,11 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_MEDIUM_SPENDER,
 			min_support: SUP_MEDIUM_SPENDER,
 		},
-	),
-	(
-		34,
-		pallet_referenda::TrackInfo {
-			name: "big_spender",
+	},
+	pallet_referenda::Track {
+		id: 34,
+		info: pallet_referenda::TrackInfo {
+			name: s("big_spender"),
 			max_deciding: 50,
 			decision_deposit: 400 * 3 * CENTS,
 			prepare_period: 10 * MINUTES,
@@ -275,15 +278,18 @@ const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15
 			min_approval: APP_BIG_SPENDER,
 			min_support: SUP_BIG_SPENDER,
 		},
-	),
+	},
 ];
 
 pub struct TracksInfo;
 impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 	type Id = u16;
 	type RuntimeOrigin = <RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin;
-	fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo<Balance, BlockNumber>)] {
-		&TRACKS_DATA[..]
+
+	fn tracks(
+	) -> impl Iterator<Item = Cow<'static, pallet_referenda::Track<Self::Id, Balance, BlockNumber>>>
+	{
+		TRACKS_DATA.iter().map(Cow::Borrowed)
 	}
 	fn track_for(id: &Self::RuntimeOrigin) -> Result<Self::Id, ()> {
 		if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) {
@@ -317,4 +323,3 @@ impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 		}
 	}
 }
-pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber);
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index 86c313afa3b1b7774158112adb0377ca67184632..3ae5e8da843acad6771fd9ec418305cf9465c761 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -533,6 +533,7 @@ impl pallet_session::Config for Runtime {
 	type SessionManager = Staking;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy;
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -770,7 +771,6 @@ impl pallet_staking::Config for Runtime {
 	type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig;
 	type EventListeners = (NominationPools, DelegatedStaking);
 	type WeightInfo = weights::pallet_staking::WeightInfo<Runtime>;
-	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
 	type MaxInvulnerables = frame_support::traits::ConstU32<20>;
 	type MaxDisabledValidators = ConstU32<100>;
 }
@@ -1872,6 +1872,10 @@ pub mod migrations {
 		parachains_scheduler::migration::MigrateV2ToV3<Runtime>,
 		pallet_staking::migrations::v16::MigrateV15ToV16<Runtime>,
 		pallet_staking::migrations::v17::MigrateV16ToV17<Runtime>,
+		pallet_session::migrations::v1::MigrateV0ToV1<
+			Runtime,
+			pallet_staking::migrations::v17::MigrateDisabledToSession<Runtime>,
+		>,
 		// permanent
 		pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	);
diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs
index f0491a1daf6c372bb34712572fc1fca3a9c0795d..add70e85fb49b02fa788e0ee6865d1c704326bcd 100644
--- a/polkadot/runtime/westend/src/weights/pallet_staking.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs
@@ -805,4 +805,8 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	fn apply_slash() -> Weight {
+		// TODO CI-FAIL: run CI bench bot
+		Weight::zero()
+	}
 }
diff --git a/polkadot/runtime/westend/src/weights/pallet_utility.rs b/polkadot/runtime/westend/src/weights/pallet_utility.rs
index 84fa0589a58218da02846794b6ced503316c0096..a13c68545526dac2339d935aec284065fa023a92 100644
--- a/polkadot/runtime/westend/src/weights/pallet_utility.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_utility.rs
@@ -99,6 +99,11 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
 			// Standard Error: 2_817
 			.saturating_add(Weight::from_parts(5_113_539, 0).saturating_mul(c.into()))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Default::default()
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs
index 9f9d3c2321228d088ab12078dfec83d7c00e9bc6..ba916abecf053613f400d1fca40dbfced205b921 100644
--- a/polkadot/xcm/xcm-builder/src/barriers.rs
+++ b/polkadot/xcm/xcm-builder/src/barriers.rs
@@ -20,7 +20,7 @@ use crate::{CreateMatcher, MatchXcm};
 use core::{cell::Cell, marker::PhantomData, ops::ControlFlow, result::Result};
 use frame_support::{
 	ensure,
-	traits::{Contains, Get, ProcessMessageError},
+	traits::{Contains, ContainsPair, Get, Nothing, ProcessMessageError},
 };
 use polkadot_parachain_primitives::primitives::IsSystem;
 use xcm::prelude::*;
@@ -290,11 +290,25 @@ impl<T: Contains<Location>> ShouldExecute for AllowUnpaidExecutionFrom<T> {
 }
 
 /// Allows execution from any origin that is contained in `T` (i.e. `T::Contains(origin)`) if the
-/// message begins with the instruction `UnpaidExecution`.
+/// message explicitly includes the `UnpaidExecution` instruction.
 ///
 /// Use only for executions from trusted origin groups.
-pub struct AllowExplicitUnpaidExecutionFrom<T>(PhantomData<T>);
-impl<T: Contains<Location>> ShouldExecute for AllowExplicitUnpaidExecutionFrom<T> {
+///
+/// Allows for the message to receive teleports or reserve asset transfers and altering
+/// the origin before indicating `UnpaidExecution`.
+///
+/// Origin altering instructions are executed so the barrier can more accurately reject messages
+/// whose effective origin at the time of calling `UnpaidExecution` is not allowed.
+/// This means `T` will be checked against the actual origin _after_ being modified by prior
+/// instructions.
+///
+/// In order to execute the `AliasOrigin` instruction, the `Aliasers` type should be set to the same
+/// `Aliasers` item in the XCM configuration. If it isn't, then all messages with an `AliasOrigin`
+/// instruction will be rejected.
+pub struct AllowExplicitUnpaidExecutionFrom<T, Aliasers = Nothing>(PhantomData<(T, Aliasers)>);
+impl<T: Contains<Location>, Aliasers: ContainsPair<Location, Location>> ShouldExecute
+	for AllowExplicitUnpaidExecutionFrom<T, Aliasers>
+{
 	fn should_execute<Call>(
 		origin: &Location,
 		instructions: &mut [Instruction<Call>],
@@ -306,12 +320,69 @@ impl<T: Contains<Location>> ShouldExecute for AllowExplicitUnpaidExecutionFrom<T
 			"AllowExplicitUnpaidExecutionFrom origin: {:?}, instructions: {:?}, max_weight: {:?}, properties: {:?}",
 			origin, instructions, max_weight, _properties,
 		);
-		ensure!(T::contains(origin), ProcessMessageError::Unsupported);
-		instructions.matcher().match_next_inst(|inst| match inst {
-			UnpaidExecution { weight_limit: Limited(m), .. } if m.all_gte(max_weight) => Ok(()),
-			UnpaidExecution { weight_limit: Unlimited, .. } => Ok(()),
-			_ => Err(ProcessMessageError::Overweight(max_weight)),
-		})?;
+		// We will read up to 5 instructions before `UnpaidExecution`.
+		// This allows up to 3 asset transfer instructions, thus covering all possible transfer
+		// types, followed by a potential origin altering instruction, and a potential `SetHints`.
+		let mut actual_origin = origin.clone();
+		let processed = Cell::new(0usize);
+		let instructions_to_process = 5;
+		instructions
+			.matcher()
+			// We skip set hints and all types of asset transfer instructions.
+			.match_next_inst_while(
+				|inst| {
+					processed.get() < instructions_to_process &&
+						matches!(
+							inst,
+							ReceiveTeleportedAsset(_) |
+								ReserveAssetDeposited(_) | WithdrawAsset(_) |
+								SetHints { .. }
+						)
+				},
+				|_| {
+					processed.set(processed.get() + 1);
+					Ok(ControlFlow::Continue(()))
+				},
+			)?
+			// Then we go through all origin altering instructions and we
+			// alter the original origin.
+			.match_next_inst_while(
+				|_| processed.get() < instructions_to_process,
+				|inst| {
+					match inst {
+						ClearOrigin => {
+							// We don't support the `ClearOrigin` instruction since we always need
+							// to know the origin to know if it's allowed unpaid execution.
+							return Err(ProcessMessageError::Unsupported);
+						},
+						AliasOrigin(target) =>
+							if Aliasers::contains(&actual_origin, &target) {
+								actual_origin = target.clone();
+							} else {
+								return Err(ProcessMessageError::Unsupported);
+							},
+						DescendOrigin(child) if child != &Here => {
+							let Ok(_) = actual_origin.append_with(child.clone()) else {
+								return Err(ProcessMessageError::Unsupported);
+							};
+						},
+						_ => return Ok(ControlFlow::Break(())),
+					};
+					processed.set(processed.get() + 1);
+					Ok(ControlFlow::Continue(()))
+				},
+			)?
+			// We finally match on the required `UnpaidExecution` instruction.
+			.match_next_inst(|inst| match inst {
+				UnpaidExecution { weight_limit: Limited(m), .. } if m.all_gte(max_weight) => Ok(()),
+				UnpaidExecution { weight_limit: Unlimited, .. } => Ok(()),
+				_ => Err(ProcessMessageError::Overweight(max_weight)),
+			})?;
+
+		// After processing all the instructions, `actual_origin` was modified and we
+		// check if it's allowed to have unpaid execution.
+		ensure!(T::contains(&actual_origin), ProcessMessageError::Unsupported);
+
 		Ok(())
 	}
 }
diff --git a/polkadot/xcm/xcm-builder/src/matcher.rs b/polkadot/xcm/xcm-builder/src/matcher.rs
index ab515f180527da4a3ff0d4c68bf9ab4a337ac33f..2fed64bccde86c1132efe0887f7e920787826081 100644
--- a/polkadot/xcm/xcm-builder/src/matcher.rs
+++ b/polkadot/xcm/xcm-builder/src/matcher.rs
@@ -179,6 +179,20 @@ mod tests {
 	use std::{vec, vec::Vec};
 	use xcm::latest::prelude::*;
 
+	#[test]
+	fn match_next_inst_works() {
+		let test_cases: Vec<(Vec<Instruction<()>>, bool)> =
+			vec![(vec![ClearOrigin], true), (vec![Trap(0)], false)];
+
+		for (mut xcm, expected) in test_cases.into_iter() {
+			let result = xcm.matcher().match_next_inst(|inst| match inst {
+				ClearOrigin => Ok(()),
+				_ => Err(ProcessMessageError::Unsupported),
+			});
+			assert_eq!(result.is_ok(), expected);
+		}
+	}
+
 	#[test]
 	fn match_next_inst_while_works() {
 		let mut xcm: Vec<Instruction<()>> = vec![ClearOrigin];
diff --git a/polkadot/xcm/xcm-builder/src/tests/barriers.rs b/polkadot/xcm/xcm-builder/src/tests/barriers.rs
index 2fb8e8ed0363b09794f7e4720f8eeab58994e0ff..6ece92f623ef407a2c975d7c1b532e5b62051c6e 100644
--- a/polkadot/xcm/xcm-builder/src/tests/barriers.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/barriers.rs
@@ -147,9 +147,10 @@ fn allow_explicit_unpaid_should_work() {
 		TransferAsset { assets: (Parent, 100).into(), beneficiary: Here.into() },
 	]);
 
-	AllowExplicitUnpaidFrom::set(vec![Parent.into()]);
+	AllowExplicitUnpaidFrom::set(vec![Parent.into(), (Parent, Parachain(1000)).into()]);
+	type ExplicitUnpaidBarrier<T> = AllowExplicitUnpaidExecutionFrom<T, mock::Aliasers>;
 
-	let r = AllowExplicitUnpaidExecutionFrom::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+	let r = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
 		&Parachain(1).into(),
 		good_message.inner_mut(),
 		Weight::from_parts(20, 20),
@@ -157,7 +158,7 @@ fn allow_explicit_unpaid_should_work() {
 	);
 	assert_eq!(r, Err(ProcessMessageError::Unsupported));
 
-	let r = AllowExplicitUnpaidExecutionFrom::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+	let r = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
 		&Parent.into(),
 		bad_message1.inner_mut(),
 		Weight::from_parts(20, 20),
@@ -165,7 +166,7 @@ fn allow_explicit_unpaid_should_work() {
 	);
 	assert_eq!(r, Err(ProcessMessageError::Overweight(Weight::from_parts(20, 20))));
 
-	let r = AllowExplicitUnpaidExecutionFrom::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+	let r = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
 		&Parent.into(),
 		bad_message2.inner_mut(),
 		Weight::from_parts(20, 20),
@@ -173,7 +174,7 @@ fn allow_explicit_unpaid_should_work() {
 	);
 	assert_eq!(r, Err(ProcessMessageError::Overweight(Weight::from_parts(20, 20))));
 
-	let r = AllowExplicitUnpaidExecutionFrom::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+	let r = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
 		&Parent.into(),
 		good_message.inner_mut(),
 		Weight::from_parts(20, 20),
@@ -189,7 +190,7 @@ fn allow_explicit_unpaid_should_work() {
 		TransferAsset { assets: (Parent, 100).into(), beneficiary: Here.into() },
 	]);
 
-	let r = AllowExplicitUnpaidExecutionFrom::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+	let r = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
 		&Parent.into(),
 		message_with_different_weight_parts.inner_mut(),
 		Weight::from_parts(20, 20),
@@ -197,13 +198,372 @@ fn allow_explicit_unpaid_should_work() {
 	);
 	assert_eq!(r, Err(ProcessMessageError::Overweight(Weight::from_parts(20, 20))));
 
-	let r = AllowExplicitUnpaidExecutionFrom::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+	let r = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
 		&Parent.into(),
 		message_with_different_weight_parts.inner_mut(),
 		Weight::from_parts(10, 10),
 		&mut props(Weight::zero()),
 	);
 	assert_eq!(r, Ok(()));
+
+	// Invalid since location to alias is not allowed.
+	let mut message = Xcm::<()>::builder_unsafe()
+		.receive_teleported_asset((Here, 100u128))
+		.alias_origin(Parachain(1000))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(30, 30),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Err(ProcessMessageError::Unsupported));
+
+	// Valid because all parachains are children of the relay chain.
+	let mut message = Xcm::<()>::builder_unsafe()
+		.receive_teleported_asset((Here, 100u128))
+		.alias_origin((Parent, Parachain(1000)))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(30, 30),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Ok(()));
+
+	// Valid.
+	let mut message = Xcm::<()>::builder_unsafe()
+		.alias_origin((Parent, Parachain(1000)))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(30, 30),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Ok(()));
+
+	// Invalid because `ClearOrigin` clears origin and `UnpaidExecution`
+	// can't know if there are enough permissions.
+	let mut message = Xcm::<()>::builder_unsafe()
+		.receive_teleported_asset((Here, 100u128))
+		.clear_origin()
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(30, 30),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Err(ProcessMessageError::Unsupported));
+
+	// Valid.
+	let mut message = Xcm::<()>::builder_unsafe()
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited((Parent, 100u128))
+		.descend_origin(Parachain(1000))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(40, 40),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Ok(()));
+
+	// Invalid because of `ClearOrigin`.
+	let mut message = Xcm::<()>::builder_unsafe()
+		.receive_teleported_asset((Here, 100u128))
+		.clear_origin()
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(30, 30),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Err(ProcessMessageError::Unsupported));
+
+	// Invalid because there is no `UnpaidExecution`.
+	let mut message = Xcm::<()>::builder_unsafe()
+		.receive_teleported_asset((Here, 100u128))
+		.alias_origin((Parent, Parachain(1000)))
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(30, 30),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Err(ProcessMessageError::BadFormat));
+
+	// Invalid because even though alias is valid, it can't use `UnpaidExecution`.
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut message = Xcm::<()>::builder_unsafe()
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.alias_origin((Parent, AccountId32 { id: [128u8; 32], network: None }))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(60, 60),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Err(ProcessMessageError::Unsupported));
+
+	// Invalid because `UnpaidExecution` specifies less weight than needed.
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut message = Xcm::<()>::builder_unsafe()
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.alias_origin((Parent, Parachain(1000)))
+		.unpaid_execution(Limited(Weight::from_parts(50, 50)), None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(60, 60),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Err(ProcessMessageError::Overweight(Weight::from_parts(60, 60))));
+
+	// Invalid because of too many instructions before `UnpaidExecution`.
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut message = Xcm::<()>::builder_unsafe()
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.alias_origin((Parent, AccountId32 { id: [128u8; 32], network: None }))
+		.unpaid_execution(Limited(Weight::from_parts(50, 50)), None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(70, 70),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Err(ProcessMessageError::Overweight(Weight::from_parts(70, 70))));
+
+	// Valid.
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut message = Xcm::<()>::builder_unsafe()
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.alias_origin((Parent, Parachain(1000)))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(60, 60),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Ok(()));
+
+	// Valid.
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut message = Xcm::<()>::builder_unsafe()
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.descend_origin(Parachain(1000))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = ExplicitUnpaidBarrier::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(60, 60),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Ok(()));
+}
+
+#[test]
+fn allow_explicit_unpaid_fails_with_alias_origin_if_no_aliasers() {
+	AllowExplicitUnpaidFrom::set(vec![(Parent, Parachain(1000)).into()]);
+
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut good_message = Xcm::<()>::builder_unsafe()
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.descend_origin(Parachain(1000))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result =
+		AllowExplicitUnpaidExecutionFrom::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+			&Parent.into(),
+			good_message.inner_mut(),
+			Weight::from_parts(100, 100),
+			&mut props(Weight::zero()),
+		);
+	assert_eq!(result, Ok(()));
+
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut bad_message = Xcm::<()>::builder_unsafe()
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.alias_origin((Parent, Parachain(1000)))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	// Barrier has `Aliasers` set as `Nothing` by default, rejecting message if it
+	// has an `AliasOrigin` instruction.
+	let result =
+		AllowExplicitUnpaidExecutionFrom::<IsInVec<AllowExplicitUnpaidFrom>>::should_execute(
+			&Parent.into(),
+			bad_message.inner_mut(),
+			Weight::from_parts(100, 100),
+			&mut props(Weight::zero()),
+		);
+	assert_eq!(result, Err(ProcessMessageError::Unsupported));
+}
+
+#[test]
+fn allow_explicit_unpaid_with_computed_origin() {
+	AllowExplicitUnpaidFrom::set(vec![
+		(Parent, Parachain(1000)).into(),
+		(Parent, Parent, GlobalConsensus(Polkadot), Parachain(1000)).into(),
+	]);
+	type ExplicitUnpaidBarrier<T> = AllowExplicitUnpaidExecutionFrom<T, mock::Aliasers>;
+
+	// Message that passes without `WithComputedOrigin` should also pass with it.
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut message = Xcm::<()>::builder_unsafe()
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.alias_origin((Parent, Parachain(1000)))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = WithComputedOrigin::<
+		ExplicitUnpaidBarrier<IsInVec<AllowExplicitUnpaidFrom>>,
+		ExecutorUniversalLocation,
+		ConstU32<2>,
+	>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(100, 100),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Ok(()));
+
+	// Can manipulate origin before the inner barrier.
+	// For example, to act as another network.
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut message = Xcm::<()>::builder_unsafe()
+		.universal_origin(Polkadot)
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.alias_origin((Parent, Parent, GlobalConsensus(Polkadot), Parachain(1000)))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = WithComputedOrigin::<
+		ExplicitUnpaidBarrier<IsInVec<AllowExplicitUnpaidFrom>>,
+		ExecutorUniversalLocation,
+		ConstU32<2>,
+	>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(100, 100),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Ok(()));
+
+	// Any invalid conversions from the new origin fail.
+	let assets: Vec<Asset> = vec![
+		(Parent, 100u128).into(),
+		((Parent, PalletInstance(10), GeneralIndex(1000)), 100u128).into(),
+	];
+	let mut message = Xcm::<()>::builder_unsafe()
+		.universal_origin(Polkadot)
+		.set_hints(vec![AssetClaimer {
+			location: AccountId32 { id: [100u8; 32], network: None }.into(),
+		}])
+		.receive_teleported_asset((Here, 100u128))
+		.reserve_asset_deposited(assets)
+		.withdraw_asset((GeneralIndex(1), 100u128))
+		.alias_origin((Parent, Parachain(1000)))
+		.unpaid_execution(Unlimited, None)
+		.build();
+	let result = WithComputedOrigin::<
+		ExplicitUnpaidBarrier<IsInVec<AllowExplicitUnpaidFrom>>,
+		ExecutorUniversalLocation,
+		ConstU32<2>,
+	>::should_execute(
+		&Parent.into(),
+		message.inner_mut(),
+		Weight::from_parts(100, 100),
+		&mut props(Weight::zero()),
+	);
+	assert_eq!(result, Err(ProcessMessageError::Unsupported));
 }
 
 #[test]
diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs
index 127888104a4ad77b0272d8e230f724d3e1cca7ac..2df6bdfe15062324221e66dc8577f2daad61b1f2 100644
--- a/polkadot/xcm/xcm-builder/src/tests/mock.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs
@@ -22,9 +22,9 @@ use crate::{
 	EnsureDecodableXcm,
 };
 pub use crate::{
-	AliasForeignAccountId32, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses,
-	AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, FixedRateOfFungible,
-	FixedWeightBounds, TakeWeightCredit,
+	AliasChildLocation, AliasForeignAccountId32, AllowExplicitUnpaidExecutionFrom,
+	AllowKnownQueryResponses, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom,
+	FixedRateOfFungible, FixedWeightBounds, TakeWeightCredit,
 };
 pub use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet};
 pub use codec::{Decode, Encode};
@@ -733,6 +733,9 @@ impl Contains<Location> for ParentPrefix {
 	}
 }
 
+/// Pairs (location1, location2) where location1 can alias as location2.
+pub type Aliasers = (AliasForeignAccountId32<SiblingPrefix>, AliasChildLocation);
+
 pub struct TestConfig;
 impl Config for TestConfig {
 	type RuntimeCall = TestCall;
@@ -758,7 +761,7 @@ impl Config for TestConfig {
 	type MessageExporter = TestMessageExporter;
 	type CallDispatcher = TestCall;
 	type SafeCallFilter = Everything;
-	type Aliasers = AliasForeignAccountId32<SiblingPrefix>;
+	type Aliasers = Aliasers;
 	type TransactionalProcessor = ();
 	type HrmpNewChannelOpenRequestHandler = ();
 	type HrmpChannelAcceptedHandler = ();
diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs
index d0f18aea1ab318e223859cd3ba106cec4c29775d..e2becdbdcd386c2245ca074af8d10610681acd61 100644
--- a/polkadot/xcm/xcm-executor/src/lib.rs
+++ b/polkadot/xcm/xcm-executor/src/lib.rs
@@ -1194,7 +1194,7 @@ impl<Config: config::Config> XcmExecutor<Config> {
 					// transferring the other assets. This is required to satisfy the
 					// `MAX_ASSETS_FOR_BUY_EXECUTION` limit in the `AllowTopLevelPaidExecutionFrom`
 					// barrier.
-					if let Some(remote_fees) = remote_fees {
+					let remote_fees_paid = if let Some(remote_fees) = remote_fees {
 						let reanchored_fees = match remote_fees {
 							AssetTransferFilter::Teleport(fees_filter) => {
 								let teleport_fees = self
@@ -1239,11 +1239,10 @@ impl<Config: config::Config> XcmExecutor<Config> {
 						// move these assets to the fees register for covering execution and paying
 						// any subsequent fees
 						message.push(PayFees { asset: fees });
+						true
 					} else {
-						// unpaid execution
-						message
-							.push(UnpaidExecution { weight_limit: Unlimited, check_origin: None });
-					}
+						false
+					};
 
 					// add any extra asset transfers
 					for asset_filter in assets {
@@ -1270,23 +1269,36 @@ impl<Config: config::Config> XcmExecutor<Config> {
 								)?,
 						};
 					}
+
 					if preserve_origin {
-						// preserve current origin for subsequent user-controlled instructions on
-						// remote chain
-						let original_origin = self
+						// We alias the origin if it's not a noop (origin != `Here`).
+						if let Some(original_origin) = self
 							.origin_ref()
+							.filter(|origin| *origin != &Location::here())
 							.cloned()
-							.and_then(|origin| {
-								Self::try_reanchor(origin, &destination)
-									.map(|(reanchored, _)| reanchored)
-									.ok()
-							})
-							.ok_or(XcmError::BadOrigin)?;
-						message.push(AliasOrigin(original_origin));
+						{
+							// preserve current origin for subsequent user-controlled instructions on
+							// remote chain
+							let reanchored_origin = Self::try_reanchor(original_origin, &destination)?.0;
+							message.push(AliasOrigin(reanchored_origin));
+						}
 					} else {
 						// clear origin for subsequent user-controlled instructions on remote chain
 						message.push(ClearOrigin);
 					}
+
+					// If not intending to pay for fees then we append the `UnpaidExecution`
+					// _AFTER_ origin altering instructions.
+					// When origin is not preserved, it's probably going to fail on the receiver.
+					if !remote_fees_paid {
+						// We push the UnpaidExecution instruction to notify we do not intend to pay
+						// for fees.
+						// The receiving chain must decide based on the origin of the message if they
+						// accept this.
+						message
+							.push(UnpaidExecution { weight_limit: Unlimited, check_origin: None });
+					}
+
 					// append custom instructions
 					message.extend(remote_xcm.0.into_iter());
 					// send the onward XCM
diff --git a/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs b/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs
index 09ed1f44cc4af4b34b570c5e00aad7bf2770883d..8786186006be9780d2e1f863b122a0ea6aa66643 100644
--- a/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs
+++ b/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs
@@ -20,6 +20,7 @@
 //! [Fellowship RFC 122](https://github.com/polkadot-fellows/rfCs/pull/122), and the
 //! [specification](https://github.com/polkadot-fellows/xcm-format) for more information.
 
+use codec::Encode;
 use xcm::{latest::AssetTransferFilter, prelude::*};
 
 use super::mock::*;
@@ -104,3 +105,115 @@ fn preserves_origin() {
 	assert!(matches!(instr.next().unwrap(), RefundSurplus));
 	assert!(matches!(instr.next().unwrap(), DepositAsset { .. }));
 }
+
+#[test]
+fn unpaid_execution_goes_after_origin_alteration() {
+	// Make sure the sender has enough funds to withdraw.
+	add_asset(SENDER, (Here, 100u128));
+
+	let xcm_on_destination =
+		Xcm::builder_unsafe().refund_surplus().deposit_asset(All, RECIPIENT).build();
+	let asset: Asset = (Here, 90u128).into();
+	let xcm = Xcm::builder()
+		.withdraw_asset((Here, 100u128))
+		.pay_fees((Here, 10u128))
+		.initiate_transfer(
+			Parent,
+			None, // We specify no remote fees.
+			true, // Preserve origin, necessary for `UnpaidExecution`.
+			vec![AssetTransferFilter::ReserveDeposit(asset.into())],
+			xcm_on_destination,
+		)
+		.build();
+
+	// We initialize the executor with the SENDER origin, which is not waived.
+	let (mut vm, _) = instantiate_executor(SENDER, xcm.clone());
+
+	// Program fails with `BadOrigin`.
+	let result = vm.bench_process(xcm);
+	assert!(result.is_ok(), "execution error {:?}", result);
+
+	let (destination, sent_message) = sent_xcm().pop().unwrap();
+	assert_eq!(destination, Parent.into());
+	assert_eq!(sent_message.len(), 5);
+	let mut instructions = sent_message.inner().iter();
+	assert!(matches!(instructions.next().unwrap(), ReserveAssetDeposited(..)));
+	assert!(matches!(
+		instructions.next().unwrap(),
+		AliasOrigin(origin) if matches!(origin.unpack(), (0, [Parachain(1000), AccountId32 { id: SENDER, network: None }]))
+	));
+	assert!(matches!(instructions.next().unwrap(), UnpaidExecution { .. }));
+	assert!(matches!(instructions.next().unwrap(), RefundSurplus));
+	assert!(matches!(instructions.next().unwrap(), DepositAsset { .. }));
+}
+
+#[test]
+fn no_alias_origin_if_root() {
+	// Make sure the sender has enough funds to withdraw.
+	add_asset(Here, (Here, 100u128));
+
+	let xcm_on_destination =
+		Xcm::builder_unsafe().refund_surplus().deposit_asset(All, RECIPIENT).build();
+	let asset: Asset = (Here, 90u128).into();
+	let xcm = Xcm::builder()
+		.withdraw_asset((Here, 100u128))
+		.pay_fees((Here, 10u128))
+		.initiate_transfer(
+			Parent,
+			None, // We specify no remote fees.
+			true, // Preserve origin, necessary for `UnpaidExecution`.
+			vec![AssetTransferFilter::ReserveDeposit(asset.into())],
+			xcm_on_destination,
+		)
+		.build();
+
+	// We initialize the executor with the SENDER origin, which is not waived.
+	let (mut vm, _) = instantiate_executor(Here, xcm.clone());
+
+	// Program fails with `BadOrigin`.
+	let result = vm.bench_process(xcm);
+	assert!(result.is_ok(), "execution error {:?}", result);
+
+	let (destination, sent_message) = sent_xcm().pop().unwrap();
+	assert_eq!(destination, Parent.into());
+	assert_eq!(sent_message.len(), 4);
+	let mut instructions = sent_message.inner().iter();
+	assert!(matches!(instructions.next().unwrap(), ReserveAssetDeposited(..)));
+	assert!(matches!(instructions.next().unwrap(), UnpaidExecution { .. }));
+	assert!(matches!(instructions.next().unwrap(), RefundSurplus));
+	assert!(matches!(instructions.next().unwrap(), DepositAsset { .. }));
+}
+
+// We simulate going from one system parachain to another without
+// having to pay remote fees.
+#[test]
+fn unpaid_transact() {
+	let to_another_system_para: Location = (Parent, Parachain(1001)).into();
+	// We want to execute some call in the receiving chain.
+	let xcm_on_destination = Xcm::builder_unsafe()
+		.transact(OriginKind::Superuser, None, b"".encode())
+		.build();
+	let xcm = Xcm::builder_unsafe()
+		.initiate_transfer(
+			to_another_system_para.clone(),
+			None,   // We specify no remote fees.
+			true,   // Preserve necessary for `UnpaidExecution`.
+			vec![], // No need for assets.
+			xcm_on_destination,
+		)
+		.build();
+
+	// We initialize the executor with the root origin, which is waived.
+	let (mut vm, _) = instantiate_executor(Here, xcm.clone());
+
+	// Program executes successfully.
+	let result = vm.bench_process(xcm.clone());
+	assert!(result.is_ok(), "execution error: {:?}", result);
+
+	let (destination, sent_message) = sent_xcm().pop().unwrap();
+	assert_eq!(destination, to_another_system_para);
+	assert_eq!(sent_message.len(), 2);
+	let mut instructions = sent_message.inner().iter();
+	assert!(matches!(instructions.next().unwrap(), UnpaidExecution { .. }));
+	assert!(matches!(instructions.next().unwrap(), Transact { .. }));
+}
diff --git a/polkadot/xcm/xcm-executor/src/tests/mock.rs b/polkadot/xcm/xcm-executor/src/tests/mock.rs
index 9cf258331f38a4ff65558c3441b8d3dddbdca638..c0bcfe88d2baa8aff42142c368b52f035a30b552 100644
--- a/polkadot/xcm/xcm-executor/src/tests/mock.rs
+++ b/polkadot/xcm/xcm-executor/src/tests/mock.rs
@@ -29,8 +29,11 @@ use sp_runtime::traits::Dispatchable;
 use xcm::prelude::*;
 
 use crate::{
-	traits::{DropAssets, Properties, ShouldExecute, TransactAsset, WeightBounds, WeightTrader},
-	AssetsInHolding, Config, XcmExecutor,
+	traits::{
+		DropAssets, FeeManager, Properties, ShouldExecute, TransactAsset, WeightBounds,
+		WeightTrader,
+	},
+	AssetsInHolding, Config, FeeReason, XcmExecutor,
 };
 
 /// We create an XCVM instance instead of calling `XcmExecutor::<_>::prepare_and_execute` so we
@@ -244,6 +247,26 @@ pub fn sent_xcm() -> Vec<(Location, Xcm<()>)> {
 	SENT_XCM.with(|q| (*q.borrow()).clone())
 }
 
+/// A mock contract address that doesn't need to pay for fees.
+pub const WAIVED_CONTRACT_ADDRESS: [u8; 20] = [128; 20];
+
+/// Test fee manager that will waive the fee for some origins.
+///
+/// Doesn't do anything with the fee, which effectively burns it.
+pub struct TestFeeManager;
+impl FeeManager for TestFeeManager {
+	fn is_waived(origin: Option<&Location>, _: FeeReason) -> bool {
+		let Some(origin) = origin else { return false };
+		// Match the root origin and a particular smart contract account.
+		matches!(
+			origin.unpack(),
+			(0, []) | (0, [AccountKey20 { network: None, key: WAIVED_CONTRACT_ADDRESS }])
+		)
+	}
+
+	fn handle_fee(_: Assets, _: Option<&XcmContext>, _: FeeReason) {}
+}
+
 /// Test XcmConfig that uses all the test implementations in this file.
 pub struct XcmConfig;
 impl Config for XcmConfig {
@@ -265,7 +288,7 @@ impl Config for XcmConfig {
 	type SubscriptionService = ();
 	type PalletInstancesInfo = ();
 	type MaxAssetsIntoHolding = MaxAssetsIntoHolding;
-	type FeeManager = ();
+	type FeeManager = TestFeeManager;
 	type MessageExporter = ();
 	type UniversalAliases = Nothing;
 	type CallDispatcher = Self::RuntimeCall;
diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs
index fc650ae55a785b08ab609feb07337d099231904c..38530fd3f5aa5c7346b4158593ffd72df1e13b39 100644
--- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs
+++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs
@@ -160,13 +160,13 @@ pub mod mock_msg_queue {
 		type XcmExecutor: ExecuteXcm<Self::RuntimeCall>;
 	}
 
-	#[pallet::call]
-	impl<T: Config> Pallet<T> {}
-
 	#[pallet::pallet]
 	#[pallet::without_storage_info]
 	pub struct Pallet<T>(_);
 
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {}
+
 	#[pallet::storage]
 	#[pallet::getter(fn parachain_id)]
 	pub(super) type ParachainId<T: Config> = StorageValue<_, ParaId, ValueQuery>;
diff --git a/prdoc/pr_2072.prdoc b/prdoc/pr_2072.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..fdc1a373938d2be855daaa98077c09b8e23bffe6
--- /dev/null
+++ b/prdoc/pr_2072.prdoc
@@ -0,0 +1,23 @@
+title: "Return iterator in pallet_referenda::TracksInfo::tracks"
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      Change the return type of the trait method `pallet_referenda::TracksInfo::tracks` to return an 
+      iterator of `Cow<'static, Tracks<_, _, _>>` instead of a static slice in order to support more 
+      flexible implementations that can define referenda tracks dynamically.
+  - audience: Runtime User
+    description: |
+      There is a change in `pallet-referenda`. Now, the tracks are retrieved as a list of `Track`s. Also, the names of
+      the tracks might have some trailing null values (`\0`). This means display representation of the tracks' names
+      must be sanitized.
+
+crates:
+  - name: pallet-referenda
+    bump: major
+  - name: westend-runtime
+    bump: major
+  - name: rococo-runtime
+    bump: major
+  - name: collectives-westend-runtime
+    bump: major
\ No newline at end of file
diff --git a/prdoc/pr_6440.prdoc b/prdoc/pr_6440.prdoc
index 376e59fa752eb9870e7d6078caf8ee4310a2f0c8..406050bbf6a50a869e2d6b0daebd7a86ff5842dd 100644
--- a/prdoc/pr_6440.prdoc
+++ b/prdoc/pr_6440.prdoc
@@ -6,3 +6,4 @@ doc:
 crates:
 - name: polkadot-node-core-pvf
   validate: false
+  bump: none
diff --git a/prdoc/pr_6455.prdoc b/prdoc/pr_6455.prdoc
index 9a83048e2fd292cdd71e3504bbfd4f94a4e3dfa3..d998473133569f87b1a7d4c80226863416ccfbb0 100644
--- a/prdoc/pr_6455.prdoc
+++ b/prdoc/pr_6455.prdoc
@@ -6,3 +6,4 @@ doc:
 crates:
 - name: sc-network
   validate: false
+  bump: none
diff --git a/prdoc/pr_6549.prdoc b/prdoc/pr_6549.prdoc
deleted file mode 100644
index 61a64c72418576b6e35b7ae235e2690fa28b8ccf..0000000000000000000000000000000000000000
--- a/prdoc/pr_6549.prdoc
+++ /dev/null
@@ -1,247 +0,0 @@
-doc: []
-
-crates:
-  - name: polkadot-sdk
-    bump: none
-  - name: asset-test-utils
-    bump: none
-  - name: cumulus-pallet-parachain-system
-    bump: none
-  - name: cumulus-pallet-parachain-system-proc-macro
-    bump: none
-  - name: cumulus-primitives-core
-    bump: none
-  - name: polkadot-core-primitives
-    bump: none
-  - name: polkadot-parachain-primitives
-    bump: none
-  - name: polkadot-primitives
-    bump: none
-  - name: staging-xcm
-    bump: none
-  - name: xcm-procedural
-    bump: none
-  - name: cumulus-primitives-parachain-inherent
-    bump: none
-  - name: cumulus-primitives-proof-size-hostfunction
-    bump: none
-  - name: polkadot-runtime-common
-    bump: none
-  - name: polkadot-runtime-parachains
-    bump: none
-  - name: polkadot-runtime-metrics
-    bump: none
-  - name: staging-xcm-executor
-    bump: none
-  - name: slot-range-helper
-    bump: none
-  - name: staging-xcm-builder
-    bump: none
-  - name: pallet-xcm
-    bump: none
-  - name: cumulus-primitives-storage-weight-reclaim
-    bump: none
-  - name: cumulus-pallet-aura-ext
-    bump: none
-  - name: cumulus-primitives-aura
-    bump: none
-  - name: staging-parachain-info
-    bump: none
-  - name: cumulus-test-relay-sproof-builder
-    bump: none
-  - name: cumulus-client-cli
-    bump: none
-  - name: cumulus-client-collator
-    bump: none
-  - name: cumulus-client-consensus-common
-    bump: none
-  - name: cumulus-client-pov-recovery
-    bump: none
-  - name: cumulus-relay-chain-interface
-    bump: none
-  - name: polkadot-overseer
-    bump: none
-  - name: tracing-gum
-    bump: none
-  - name: tracing-gum-proc-macro
-    bump: none
-  - name: polkadot-node-metrics
-    bump: none
-  - name: polkadot-node-primitives
-    bump: none
-  - name: polkadot-erasure-coding
-    bump: none
-  - name: polkadot-node-subsystem
-    bump: none
-  - name: polkadot-node-subsystem-types
-    bump: none
-  - name: polkadot-node-network-protocol
-    bump: none
-  - name: polkadot-statement-table
-    bump: none
-  - name: polkadot-rpc
-    bump: none
-  - name: polkadot-service
-    bump: none
-  - name: cumulus-client-parachain-inherent
-    bump: none
-  - name: westend-runtime
-    bump: none
-  - name: pallet-xcm-benchmarks
-    bump: none
-  - name: westend-runtime-constants
-    bump: none
-  - name: polkadot-approval-distribution
-    bump: none
-  - name: polkadot-node-subsystem-util
-    bump: none
-  - name: polkadot-availability-bitfield-distribution
-    bump: none
-  - name: polkadot-availability-distribution
-    bump: none
-  - name: polkadot-availability-recovery
-    bump: none
-  - name: polkadot-node-core-approval-voting
-    bump: none
-  - name: polkadot-node-core-approval-voting-parallel
-    bump: none
-  - name: polkadot-node-core-av-store
-    bump: none
-  - name: polkadot-node-core-chain-api
-    bump: none
-  - name: polkadot-statement-distribution
-    bump: none
-  - name: polkadot-collator-protocol
-    bump: none
-  - name: polkadot-dispute-distribution
-    bump: none
-  - name: polkadot-gossip-support
-    bump: none
-  - name: polkadot-network-bridge
-    bump: none
-  - name: polkadot-node-collation-generation
-    bump: none
-  - name: polkadot-node-core-backing
-    bump: none
-  - name: polkadot-node-core-bitfield-signing
-    bump: none
-  - name: polkadot-node-core-candidate-validation
-    bump: none
-  - name: polkadot-node-core-pvf
-    bump: none
-  - name: polkadot-node-core-pvf-common
-    bump: none
-  - name: polkadot-node-core-pvf-execute-worker
-    bump: none
-  - name: polkadot-node-core-pvf-prepare-worker
-    bump: none
-  - name: staging-tracking-allocator
-    bump: none
-  - name: rococo-runtime
-    bump: none
-  - name: rococo-runtime-constants
-    bump: none
-  - name: polkadot-node-core-chain-selection
-    bump: none
-  - name: polkadot-node-core-dispute-coordinator
-    bump: none
-  - name: polkadot-node-core-parachains-inherent
-    bump: none
-  - name: polkadot-node-core-prospective-parachains
-    bump: none
-  - name: polkadot-node-core-provisioner
-    bump: none
-  - name: polkadot-node-core-pvf-checker
-    bump: none
-  - name: polkadot-node-core-runtime-api
-    bump: none
-  - name: cumulus-client-network
-    bump: none
-  - name: cumulus-relay-chain-inprocess-interface
-    bump: none
-  - name: polkadot-cli
-    bump: none
-  - name: cumulus-client-consensus-aura
-    bump: none
-  - name: cumulus-client-consensus-proposer
-    bump: none
-  - name: cumulus-client-consensus-relay-chain
-    bump: none
-  - name: cumulus-client-service
-    bump: none
-  - name: cumulus-relay-chain-minimal-node
-    bump: none
-  - name: cumulus-relay-chain-rpc-interface
-    bump: none
-  - name: parachains-common
-    bump: none
-  - name: cumulus-primitives-utility
-    bump: none
-  - name: cumulus-pallet-xcmp-queue
-    bump: none
-  - name: parachains-runtimes-test-utils
-    bump: none
-  - name: assets-common
-    bump: none
-  - name: bridge-hub-common
-    bump: none
-  - name: bridge-hub-test-utils
-    bump: none
-  - name: cumulus-pallet-solo-to-para
-    bump: none
-  - name: cumulus-pallet-xcm
-    bump: none
-  - name: cumulus-ping
-    bump: none
-  - name: cumulus-primitives-timestamp
-    bump: none
-  - name: emulated-integration-tests-common
-    bump: none
-  - name: xcm-emulator
-    bump: none
-  - name: pallet-collective-content
-    bump: none
-  - name: xcm-simulator
-    bump: none
-  - name: pallet-revive-fixtures
-    bump: none
-  - name: polkadot-omni-node-lib
-    bump: none
-  - name: snowbridge-runtime-test-common
-    bump: none
-  - name: testnet-parachains-constants
-    bump: none
-  - name: asset-hub-rococo-runtime
-    bump: none
-  - name: asset-hub-westend-runtime
-    bump: none
-  - name: bridge-hub-rococo-runtime
-    bump: none
-  - name: bridge-hub-westend-runtime
-    bump: none
-  - name: collectives-westend-runtime
-    bump: none
-  - name: coretime-rococo-runtime
-    bump: none
-  - name: coretime-westend-runtime
-    bump: none
-  - name: people-rococo-runtime
-    bump: none
-  - name: people-westend-runtime
-    bump: none
-  - name: contracts-rococo-runtime
-    bump: none
-  - name: glutton-westend-runtime
-    bump: none
-  - name: rococo-parachain-runtime
-    bump: none
-  - name: polkadot-omni-node
-    bump: none
-  - name: polkadot-parachain-bin
-    bump: none
-  - name: polkadot
-    bump: none
-  - name: polkadot-voter-bags
-    bump: none
-  - name: xcm-simulator-example
-    bump: none
diff --git a/prdoc/pr_6636.prdoc b/prdoc/pr_6636.prdoc
index 1db5fd54d97168675802b570685a0c92610ccb8c..159685f5a5ce49bd22c94bb89f1b6db59cf9ff01 100644
--- a/prdoc/pr_6636.prdoc
+++ b/prdoc/pr_6636.prdoc
@@ -7,3 +7,4 @@ doc:
 crates:
 - name: sc-network
   validate: false
+  bump: none
diff --git a/prdoc/pr_6988.prdoc b/prdoc/pr_6988.prdoc
deleted file mode 100644
index 18f70f9fd97f1f316bec59a8072e89a8acec1c8b..0000000000000000000000000000000000000000
--- a/prdoc/pr_6988.prdoc
+++ /dev/null
@@ -1,5 +0,0 @@
-doc: []
-
-crates:
-  - name: polkadot
-    bump: none
\ No newline at end of file
diff --git a/prdoc/pr_7014.prdoc b/prdoc/pr_7014.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..e4e0214480a3ed420d402d21395f5e9eb50505f9
--- /dev/null
+++ b/prdoc/pr_7014.prdoc
@@ -0,0 +1,24 @@
+title: Remove `yamux_window_size` from network config
+doc:
+- audience: Node Dev
+  description: |-
+    # Description
+
+    resolve #6468
+
+
+
+    # Checklist
+
+    * [x] My PR includes a detailed description as outlined in the "Description" and its two subsections above.
+    * [ ] My PR follows the [labeling requirements](
+    https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#Process
+    ) of this project (at minimum one label for `T` required)
+        * External contributors: ask maintainers to put the right label on your PR.
+    * [ ] I have made corresponding changes to the documentation (if applicable)
+    * [ ] I have added tests that prove my fix is effective or that my feature works (if applicable)
+crates:
+- name: sc-cli
+  bump: major
+- name: sc-network
+  bump: major
diff --git a/prdoc/pr_7407.prdoc b/prdoc/pr_7407.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..e99e41769184807bae37bd44ee6826d683cb7b98
--- /dev/null
+++ b/prdoc/pr_7407.prdoc
@@ -0,0 +1,40 @@
+title: 'Fixes #219'
+doc:
+- audience: Runtime Dev
+  description: |-
+    Add a new extrinsic `dispatch_as_fallible`.
+
+    It's almost the same as `dispatch_as` but check the result of the call.
+
+    Closes #219.
+
+    And add more unit tests to cover `dispatch_as` and `dispatch_as_fallible`.
+
+    ---
+
+    Polkadot address: 156HGo9setPcU2qhFMVWLkcmtCEGySLwNqa3DaEiYSWtte4Y
+crates:
+- name: asset-hub-rococo-runtime
+  bump: minor
+- name: asset-hub-westend-runtime
+  bump: minor
+- name: bridge-hub-rococo-runtime
+  bump: minor
+- name: bridge-hub-westend-runtime
+  bump: minor
+- name: collectives-westend-runtime
+  bump: minor
+- name: coretime-rococo-runtime
+  bump: minor
+- name: coretime-westend-runtime
+  bump: minor
+- name: people-rococo-runtime
+  bump: minor
+- name: people-westend-runtime
+  bump: minor
+- name: rococo-runtime
+  bump: minor
+- name: westend-runtime
+  bump: minor
+- name: pallet-utility
+  bump: minor
diff --git a/prdoc/pr_7423.prdoc b/prdoc/pr_7423.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..30e22a7ccf8665db07dcb6cdd72062fec46da6d9
--- /dev/null
+++ b/prdoc/pr_7423.prdoc
@@ -0,0 +1,17 @@
+title: Fix issue with InitiateTransfer and UnpaidExecution
+doc:
+- audience: Runtime Dev
+  description: 
+    Fix issue where setting the `remote_fees` field of `InitiateTransfer` to `None` could lead to unintended bypassing of fees in certain conditions.
+    `UnpaidExecution` is now appended **after** origin alteration.
+    If planning to use `UnpaidExecution`, you need to set `preserve_origin = true`.
+
+    The `AllowExplicitUnpaidExecutionFrom` barrier now allows instructions for receiving funds before origin altering instructions before
+    the actual `UnpaidExecution`.
+    It takes a new generic, `Aliasers`, needed for executing `AliasOrigin` to see if the effective origin is allowed to use `UnpaidExecution`.
+    This should be set to the same value as `Aliasers` in the XCM configuration.
+crates:
+- name: staging-xcm-builder
+  bump: patch
+- name: staging-xcm-executor
+  bump: patch
diff --git a/prdoc/pr_7424.prdoc b/prdoc/pr_7424.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..e177f41371bc6492ad1aeb239e120b5b556df5d7
--- /dev/null
+++ b/prdoc/pr_7424.prdoc
@@ -0,0 +1,37 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: 'Bounded Slashing: Paginated Offence Processing & Slash Application'
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      This PR refactors the slashing mechanism in `pallet-staking` to be bounded by introducing paged offence processing and paged slash application.
+
+            ### Key Changes
+            - Offences are queued instead of being processed immediately.
+            - Slashes are computed in pages, stored as a `StorageDoubleMap` with `(Validator, SlashFraction, PageIndex)` to uniquely identify them.
+            - Slashes are applied incrementally across multiple blocks instead of a single unbounded operation.
+            - New storage items: `OffenceQueue`, `ProcessingOffence`, `OffenceQueueEras`.
+            - Updated API for cancelling and applying slashes.
+            - Preliminary benchmarks added; further optimizations planned.
+
+            This enables staking slashing to scale efficiently and removes a major blocker for staking migration to a parachain (AH).
+
+crates:
+- name: pallet-babe
+  bump: patch
+- name: pallet-staking
+  bump: major
+- name: pallet-grandpa
+  bump: patch
+- name: westend-runtime
+  bump: minor
+- name: pallet-beefy
+  bump: patch
+- name: pallet-offences-benchmarking
+  bump: patch
+- name: pallet-session-benchmarking
+  bump: patch
+- name: pallet-root-offences
+  bump: patch
\ No newline at end of file
diff --git a/prdoc/pr_7494.prdoc b/prdoc/pr_7494.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..b0e1ec0266552e44b74ccadd9fd4b8f375751036
--- /dev/null
+++ b/prdoc/pr_7494.prdoc
@@ -0,0 +1,18 @@
+title: Enhance libp2p logging targets for granular control
+
+doc:
+  - audience: [Node Dev, Node Operator]
+    description: |
+      This PR modifies the libp2p networking-specific log targets for granular control (e.g., just enabling trace for req-resp).
+
+      Previously, all logs were outputted to `sub-libp2p` target, flooding the log messages on busy validators.
+        - Discovery: `sub-libp2p::discovery`;
+        - Notification/behaviour: `sub-libp2p::notification::behaviour`;
+        - Notification/handler: `sub-libp2p::notification::handler`;
+        - Notification/service: `sub-libp2p::notification::service`;
+        - Notification/upgrade: `sub-libp2p::notification::upgrade`;
+        - Request response: `sub-libp2p::request-response`.
+
+crates:
+  - name: sc-network
+    bump: patch
diff --git a/prdoc/pr_7563.prdoc b/prdoc/pr_7563.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..a24e4a3d83b90c240b4825b9db09618f57b92bed
--- /dev/null
+++ b/prdoc/pr_7563.prdoc
@@ -0,0 +1,14 @@
+title: Bump frame-metadata v16 to 19.0.0
+doc:
+- audience: Runtime Dev
+  description: Update to latest version of `frame-metadata` and `merkleized-metadata` in order to support pallet
+    view function metadata.
+crates:
+- name: sp-metadata-ir
+  bump: minor
+- name: frame-support
+  bump: none
+- name: substrate-wasm-builder
+  bump: minor
+- name: pallet-example-view-functions
+  bump: minor
diff --git a/prdoc/pr_7579.prdoc b/prdoc/pr_7579.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..3f7cbda6492935f3c92cffbbbead8575b172831f
--- /dev/null
+++ b/prdoc/pr_7579.prdoc
@@ -0,0 +1,57 @@
+title: '[AHM] Make pallet types public'
+doc:
+- audience: Runtime Dev
+  description: Preparation for AHM and making stuff public.
+crates:
+- name: cumulus-pallet-dmp-queue
+  bump: minor
+- name: cumulus-pallet-xcm
+  bump: minor
+- name: polkadot-runtime-common
+  bump: minor
+- name: polkadot-runtime-parachains
+  bump: minor
+- name: pallet-bags-list
+  bump: minor
+- name: pallet-conviction-voting
+  bump: minor
+- name: pallet-fast-unstake
+  bump: minor
+- name: pallet-multisig
+  bump: minor
+- name: pallet-nomination-pools
+  bump: minor
+- name: pallet-preimage
+  bump: minor
+- name: pallet-scheduler
+  bump: minor
+- name: pallet-vesting
+  bump: minor
+- name: staging-parachain-info
+  bump: minor
+- name: xcm-simulator
+  bump: minor
+- name: pallet-asset-conversion
+  bump: minor
+- name: pallet-assets-freezer
+  bump: minor
+- name: pallet-assets
+  bump: minor
+- name: pallet-authority-discovery
+  bump: minor
+- name: pallet-core-fellowship
+  bump: minor
+- name: pallet-delegated-staking
+  bump: minor
+- name: pallet-example-view-functions
+  bump: minor
+- name: pallet-salary
+  bump: minor
+- name: pallet-society
+  bump: minor
+- name: frame-support
+  bump: minor
+- name: pallet-treasury
+  bump: minor
+- name: pallet-uniques
+  bump: minor
diff --git a/prdoc/pr_7580.prdoc b/prdoc/pr_7580.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..ba041355506e485d3b1543b8918320d07a7e9f6b
--- /dev/null
+++ b/prdoc/pr_7580.prdoc
@@ -0,0 +1,10 @@
+title: implement web3_clientVersion
+doc:
+- audience: Runtime Dev
+  description: |-
+    Implements the `web3_clientVersion`  method. This is a common requirement for external Ethereum libraries when querying a client.
+
+    Reference issue with more details: https://github.com/paritytech/contract-issues/issues/26.
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/prdoc/pr_7581.prdoc b/prdoc/pr_7581.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..04ee5fbbff230198012ce8f2750c826a96286e9f
--- /dev/null
+++ b/prdoc/pr_7581.prdoc
@@ -0,0 +1,65 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Move validator disabling logic to pallet-session
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      This decouples disabling logic from staking, and moves it to session. This ensures validators can be disabled
+      directly when staking transitions to the system parachain and offences are reported on RC, eliminating
+      cross-network hops.
+
+crates:
+- name: pallet-staking
+  bump: major
+- name: pallet-session
+  bump: major
+- name: pallet-authority-discovery
+  bump: patch
+- name: pallet-authority-discovery
+  bump: patch
+- name: pallet-babe
+  bump: patch
+- name: pallet-grandpa
+  bump: patch
+- name: westend-runtime
+  bump: minor
+- name: pallet-beefy
+  bump: patch
+- name: pallet-beefy-mmr
+  bump: patch
+- name: pallet-offences-benchmarking
+  bump: patch
+- name: pallet-im-online
+  bump: patch
+- name: pallet-session-benchmarking
+  bump: patch
+- name: rococo-runtime
+  bump: minor
+- name: pallet-collator-selection
+  bump: patch
+- name: pallet-root-offences
+  bump: patch
+- name: asset-hub-rococo-runtime
+  bump: minor
+- name: asset-hub-westend-runtime
+  bump: minor
+- name: bridge-hub-rococo-runtime
+  bump: minor
+- name: bridge-hub-westend-runtime
+  bump: minor
+- name: collectives-westend-runtime
+  bump: minor
+- name: coretime-rococo-runtime
+  bump: minor
+- name: coretime-westend-runtime
+  bump: minor
+- name: people-rococo-runtime
+  bump: minor
+- name: people-westend-runtime
+  bump: minor
+- name: penpal-runtime
+  bump: minor
+- name: contracts-rococo-runtime
+  bump: minor
diff --git a/prdoc/pr_7582.prdoc b/prdoc/pr_7582.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..26e594c4373f2e67db0ecbeb357c16c6913238a7
--- /dev/null
+++ b/prdoc/pr_7582.prdoc
@@ -0,0 +1,17 @@
+title: Implementation of `ah-client` and `rc-client` staking pallets
+doc:
+- audience: Runtime Dev
+  description: |-
+    This PR introduces the initial structure for `pallet-ah-client` and `pallet-rc-client`. These
+    pallets will reside on the relay chain and AssetHub, respectively, and will manage the interaction
+    between `pallet-session` on the relay chain and `pallet-staking` on AssetHub.
+    Both pallets are experimental and not intended for production use.
+crates:
+- name: pallet-staking-ah-client
+  bump: major
+- name: pallet-staking-rc-client
+  bump: major
+- name: pallet-election-provider-multi-block
+  bump: minor
+- name: pallet-staking
+  bump: major
diff --git a/prdoc/pr_7589.prdoc b/prdoc/pr_7589.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..da4eb3ca21927a1bb46e7bee2040e6f71e7f54ab
--- /dev/null
+++ b/prdoc/pr_7589.prdoc
@@ -0,0 +1,8 @@
+title: '[pallet-revive] rpc add --earliest-receipt-block'
+doc:
+- audience: Runtime Dev
+  description: "Add a cli option to skip searching receipts for blocks older than\
+    \ the specified limit\r\n"
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/prdoc/pr_7590.prdoc b/prdoc/pr_7590.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..531c5b7cef124d18c1ba6e13afbf8c186fbba843
--- /dev/null
+++ b/prdoc/pr_7590.prdoc
@@ -0,0 +1,7 @@
+title: '[pallet-revive] move exec tests'
+doc:
+- audience: Runtime Dev
+  description: Moving exec tests into a new file
+crates:
+- name: pallet-revive
+  bump: minor
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 57b70f9d5498ea760f838c94a12172de03b6568a..b5e084da28ceefa9c0a6229e702cca52576b1395 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -107,7 +107,7 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H160};
 use sp_inherents::{CheckInherentsResult, InherentData};
 use sp_runtime::{
 	curve::PiecewiseLinear,
-	generic, impl_opaque_keys,
+	generic, impl_opaque_keys, str_array as s,
 	traits::{
 		self, AccountIdConversion, BlakeTwo256, Block as BlockT, Bounded, ConvertInto,
 		MaybeConvert, NumberFor, OpaqueKeys, SaturatedConversion, StaticLookup,
@@ -116,6 +116,7 @@ use sp_runtime::{
 	ApplyExtrinsicResult, FixedPointNumber, FixedU128, MultiSignature, MultiSigner, Perbill,
 	Percent, Permill, Perquintill, RuntimeDebug,
 };
+use sp_std::{borrow::Cow, prelude::*};
 #[cfg(any(feature = "std", test))]
 use sp_version::NativeVersion;
 use sp_version::RuntimeVersion;
@@ -675,8 +676,6 @@ impl_opaque_keys! {
 
 #[cfg(feature = "staking-playground")]
 pub mod staking_playground {
-	use pallet_staking::Exposure;
-
 	use super::*;
 
 	/// An adapter to make the chain work with --dev only, even though it is running a large staking
@@ -711,61 +710,43 @@ pub mod staking_playground {
 		}
 	}
 
-	impl pallet_session::historical::SessionManager<AccountId, Exposure<AccountId, Balance>>
-		for AliceAsOnlyValidator
-	{
+	impl pallet_session::historical::SessionManager<AccountId, ()> for AliceAsOnlyValidator {
 		fn end_session(end_index: sp_staking::SessionIndex) {
-			<Staking as pallet_session::historical::SessionManager<
-				AccountId,
-				Exposure<AccountId, Balance>,
-			>>::end_session(end_index)
+			<Staking as pallet_session::historical::SessionManager<AccountId, ()>>::end_session(
+				end_index,
+			)
 		}
 
-		fn new_session(
-			new_index: sp_staking::SessionIndex,
-		) -> Option<Vec<(AccountId, Exposure<AccountId, Balance>)>> {
-			<Staking as pallet_session::historical::SessionManager<
-				AccountId,
-				Exposure<AccountId, Balance>,
-			>>::new_session(new_index)
+		fn new_session(new_index: sp_staking::SessionIndex) -> Option<Vec<(AccountId, ())>> {
+			<Staking as pallet_session::historical::SessionManager<AccountId, ()>>::new_session(
+				new_index,
+			)
 			.map(|_ignored| {
 				// construct a fake exposure for alice.
-				vec![(
-					sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(),
-					pallet_staking::Exposure {
-						total: 1_000_000_000,
-						own: 1_000_000_000,
-						others: vec![],
-					},
-				)]
+				vec![(sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(), ())]
 			})
 		}
 
 		fn new_session_genesis(
 			new_index: sp_staking::SessionIndex,
-		) -> Option<Vec<(AccountId, Exposure<AccountId, Balance>)>> {
+		) -> Option<Vec<(AccountId, ())>> {
 			<Staking as pallet_session::historical::SessionManager<
 				AccountId,
-				Exposure<AccountId, Balance>,
+				(),
 			>>::new_session_genesis(new_index)
 			.map(|_ignored| {
 				// construct a fake exposure for alice.
 				vec![(
 					sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(),
-					pallet_staking::Exposure {
-						total: 1_000_000_000,
-						own: 1_000_000_000,
-						others: vec![],
-					},
+					(),
 				)]
 			})
 		}
 
 		fn start_session(start_index: sp_staking::SessionIndex) {
-			<Staking as pallet_session::historical::SessionManager<
-				AccountId,
-				Exposure<AccountId, Balance>,
-			>>::start_session(start_index)
+			<Staking as pallet_session::historical::SessionManager<AccountId, ()>>::start_session(
+				start_index,
+			)
 		}
 	}
 }
@@ -778,6 +759,8 @@ impl pallet_session::Config for Runtime {
 	type NextSessionRotation = Babe;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy;
+
 	type WeightInfo = pallet_session::weights::SubstrateWeight<Runtime>;
 	#[cfg(not(feature = "staking-playground"))]
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
@@ -789,8 +772,8 @@ impl pallet_session::Config for Runtime {
 }
 
 impl pallet_session::historical::Config for Runtime {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Runtime>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 pallet_staking_reward_curve::build! {
@@ -893,7 +876,6 @@ impl pallet_staking::Config for Runtime {
 	type EventListeners = (NominationPools, DelegatedStaking);
 	type WeightInfo = pallet_staking::weights::SubstrateWeight<Runtime>;
 	type BenchmarkingConfig = StakingBenchmarkingConfig;
-	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
 	type MaxInvulnerables = ConstU32<20>;
 	type MaxDisabledValidators = ConstU32<100>;
 }
@@ -1280,43 +1262,20 @@ pub struct TracksInfo;
 impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo {
 	type Id = u16;
 	type RuntimeOrigin = <RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin;
-	fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo<Balance, BlockNumber>)] {
-		static DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 1] = [(
-			0u16,
-			pallet_referenda::TrackInfo {
-				name: "root",
-				max_deciding: 1,
-				decision_deposit: 10,
-				prepare_period: 4,
-				decision_period: 4,
-				confirm_period: 2,
-				min_enactment_period: 4,
-				min_approval: pallet_referenda::Curve::LinearDecreasing {
-					length: Perbill::from_percent(100),
-					floor: Perbill::from_percent(50),
-					ceil: Perbill::from_percent(100),
-				},
-				min_support: pallet_referenda::Curve::LinearDecreasing {
-					length: Perbill::from_percent(100),
-					floor: Perbill::from_percent(0),
-					ceil: Perbill::from_percent(100),
-				},
-			},
-		)];
-		&DATA[..]
+
+	fn tracks(
+	) -> impl Iterator<Item = Cow<'static, pallet_referenda::Track<Self::Id, Balance, BlockNumber>>>
+	{
+		dynamic_params::referenda::Tracks::get().into_iter().map(Cow::Owned)
 	}
 	fn track_for(id: &Self::RuntimeOrigin) -> Result<Self::Id, ()> {
-		if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) {
-			match system_origin {
-				frame_system::RawOrigin::Root => Ok(0),
-				_ => Err(()),
-			}
-		} else {
-			Err(())
-		}
+		dynamic_params::referenda::Origins::get()
+			.iter()
+			.find(|(o, _)| id == o)
+			.map(|(_, track_id)| *track_id)
+			.ok_or(())
 	}
 }
-pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber);
 
 impl pallet_referenda::Config for Runtime {
 	type WeightInfo = pallet_referenda::weights::SubstrateWeight<Self>;
@@ -2676,6 +2635,46 @@ pub mod dynamic_params {
 		#[codec(index = 1)]
 		pub static ByteDeposit: Balance = 1 * CENTS;
 	}
+
+	#[dynamic_pallet_params]
+	#[codec(index = 1)]
+	pub mod referenda {
+		/// The configuration for the tracks
+		#[codec(index = 0)]
+		pub static Tracks: BoundedVec<
+			pallet_referenda::Track<u16, Balance, BlockNumber>,
+			ConstU32<100>,
+		> = BoundedVec::truncate_from(vec![pallet_referenda::Track {
+			id: 0u16,
+			info: pallet_referenda::TrackInfo {
+				name: s("root"),
+				max_deciding: 1,
+				decision_deposit: 10,
+				prepare_period: 4,
+				decision_period: 4,
+				confirm_period: 2,
+				min_enactment_period: 4,
+				min_approval: pallet_referenda::Curve::LinearDecreasing {
+					length: Perbill::from_percent(100),
+					floor: Perbill::from_percent(50),
+					ceil: Perbill::from_percent(100),
+				},
+				min_support: pallet_referenda::Curve::LinearDecreasing {
+					length: Perbill::from_percent(100),
+					floor: Perbill::from_percent(0),
+					ceil: Perbill::from_percent(100),
+				},
+			},
+		}]);
+
+		/// A list mapping every origin with a track Id
+		#[codec(index = 1)]
+		pub static Origins: BoundedVec<(OriginCaller, u16), ConstU32<100>> =
+			BoundedVec::truncate_from(vec![(
+				OriginCaller::system(frame_system::RawOrigin::Root),
+				0,
+			)]);
+	}
 }
 
 #[cfg(feature = "runtime-benchmarks")]
@@ -2701,6 +2700,10 @@ impl EnsureOriginWithArg<RuntimeOrigin, RuntimeParametersKey> for DynamicParamet
 				frame_system::ensure_root(origin.clone()).map_err(|_| origin)?;
 				return Ok(())
 			},
+			RuntimeParametersKey::Referenda(_) => {
+				frame_system::ensure_root(origin.clone()).map_err(|_| origin)?;
+				return Ok(())
+			},
 		}
 	}
 
diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs
index 748b84a50d2ae1cc60a8b5195e14e17615bc865a..c9be0b48d3443927a4267d53658994fc98f339db 100644
--- a/substrate/client/cli/src/params/network_params.rs
+++ b/substrate/client/cli/src/params/network_params.rs
@@ -275,7 +275,6 @@ impl NetworkParams {
 			allow_non_globals_in_dht,
 			kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths,
 			kademlia_replication_factor: self.kademlia_replication_factor,
-			yamux_window_size: None,
 			ipfs_server: self.ipfs_server,
 			sync_mode: self.sync.into(),
 			network_backend: self.network_backend.into(),
diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs
index 327f79fe6c1306d3ca5f6912b480ea84ffcdb689..1a64f06e74c210f26994a90e54f28801f2a93295 100644
--- a/substrate/client/network/src/config.rs
+++ b/substrate/client/network/src/config.rs
@@ -650,27 +650,6 @@ pub struct NetworkConfiguration {
 	/// Enable serving block data over IPFS bitswap.
 	pub ipfs_server: bool,
 
-	/// Size of Yamux receive window of all substreams. `None` for the default (256kiB).
-	/// Any value less than 256kiB is invalid.
-	///
-	/// # Context
-	///
-	/// By design, notifications substreams on top of Yamux connections only allow up to `N` bytes
-	/// to be transferred at a time, where `N` is the Yamux receive window size configurable here.
-	/// This means, in practice, that every `N` bytes must be acknowledged by the receiver before
-	/// the sender can send more data. The maximum bandwidth of each notifications substream is
-	/// therefore `N / round_trip_time`.
-	///
-	/// It is recommended to leave this to `None`, and use a request-response protocol instead if
-	/// a large amount of data must be transferred. The reason why the value is configurable is
-	/// that some Substrate users mis-use notification protocols to send large amounts of data.
-	/// As such, this option isn't designed to stay and will likely get removed in the future.
-	///
-	/// Note that configuring a value here isn't a modification of the Yamux protocol, but rather
-	/// a modification of the way the implementation works. Different nodes with different
-	/// configured values remain compatible with each other.
-	pub yamux_window_size: Option<u32>,
-
 	/// Networking backend used for P2P communication.
 	pub network_backend: NetworkBackendType,
 }
@@ -703,7 +682,6 @@ impl NetworkConfiguration {
 			kademlia_disjoint_query_paths: false,
 			kademlia_replication_factor: NonZeroUsize::new(DEFAULT_KADEMLIA_REPLICATION_FACTOR)
 				.expect("value is a constant; constant is non-zero; qed."),
-			yamux_window_size: None,
 			ipfs_server: false,
 			network_backend: NetworkBackendType::Libp2p,
 		}
diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs
index 917449cf228c6ef8e8864699d5241f80158f25b7..6b958de86918f8bfa3e150c6aea150a13d1dc85c 100644
--- a/substrate/client/network/src/discovery.rs
+++ b/substrate/client/network/src/discovery.rs
@@ -84,6 +84,9 @@ use std::{
 	time::{Duration, Instant},
 };
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::discovery";
+
 /// Maximum number of known external addresses that we will cache.
 /// This only affects whether we will log whenever we (re-)discover
 /// a given address.
@@ -262,7 +265,7 @@ impl DiscoveryConfig {
 				match TokioMdns::new(mdns::Config::default(), local_peer_id) {
 					Ok(mdns) => Toggle::from(Some(mdns)),
 					Err(err) => {
-						warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err);
+						warn!(target: LOG_TARGET, "Failed to initialize mDNS: {:?}", err);
 						Toggle::from(None)
 					},
 				}
@@ -375,7 +378,7 @@ impl DiscoveryBehaviour {
 		if let Some(kademlia) = self.kademlia.as_mut() {
 			if !self.allow_non_globals_in_dht && !Self::can_add_to_dht(&addr) {
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Ignoring self-reported non-global address {} from {}.", addr, peer_id
 				);
 				return
@@ -393,7 +396,7 @@ impl DiscoveryBehaviour {
 					.expect("kademlia protocol was checked above to be enabled; qed")
 			}) {
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Ignoring self-reported address {} from {} as remote node is not part of the \
 					 Kademlia DHT supported by the local node.", addr, peer_id,
 				);
@@ -401,7 +404,7 @@ impl DiscoveryBehaviour {
 			}
 
 			trace!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"Adding self-reported address {} from {} to Kademlia DHT.",
 				addr, peer_id
 			);
@@ -425,7 +428,7 @@ impl DiscoveryBehaviour {
 	pub fn put_value(&mut self, key: RecordKey, value: Vec<u8>) {
 		if let Some(k) = self.kademlia.as_mut() {
 			if let Err(e) = k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) {
-				warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e);
+				warn!(target: LOG_TARGET, "Libp2p => Failed to put record: {:?}", e);
 				self.pending_events
 					.push_back(DiscoveryOut::ValuePutFailed(key.clone(), Duration::from_secs(0)));
 			}
@@ -444,7 +447,7 @@ impl DiscoveryBehaviour {
 		if let Some(kad) = self.kademlia.as_mut() {
 			if update_local_storage {
 				if let Err(_e) = kad.store_mut().put(record.clone()) {
-					warn!(target: "sub-libp2p", "Failed to update local starage");
+					warn!(target: LOG_TARGET, "Failed to update local starage");
 				}
 			}
 
@@ -462,7 +465,7 @@ impl DiscoveryBehaviour {
 	pub fn start_providing(&mut self, key: RecordKey) {
 		if let Some(kad) = self.kademlia.as_mut() {
 			if let Err(e) = kad.start_providing(key.clone()) {
-				warn!(target: "sub-libp2p", "Libp2p => Failed to start providing {key:?}: {e}.");
+				warn!(target: LOG_TARGET, "Libp2p => Failed to start providing {key:?}: {e}.");
 				self.pending_events.push_back(DiscoveryOut::StartProvidingFailed(key));
 			}
 		}
@@ -498,7 +501,7 @@ impl DiscoveryBehaviour {
 				expires,
 			}) {
 				debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Failed to store record with key: {:?}",
 					err
 				);
@@ -712,7 +715,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 			});
 		}
 
-		trace!(target: "sub-libp2p", "Addresses of {:?}: {:?}", peer_id, list);
+		trace!(target: LOG_TARGET, "Addresses of {:?}: {:?}", peer_id, list);
 
 		Ok(list.into_iter().collect())
 	}
@@ -781,7 +784,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 				if let Some(Protocol::P2p(peer_id)) = addr.iter().last() {
 					if peer_id != self.local_peer_id {
 						warn!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"🔍 Discovered external address for a peer that is not us: {addr}",
 						);
 						// Ensure this address is not propagated to kademlia.
@@ -796,7 +799,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 					// in which case we just want to refrain from logging.
 					if self.known_external_addresses.insert(address.clone()) {
 						info!(
-						  target: "sub-libp2p",
+						  target: LOG_TARGET,
 						  "🔍 Discovered new external address for our node: {address}",
 						);
 					}
@@ -805,7 +808,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 				self.kademlia.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e));
 			},
 			event => {
-				debug!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}");
+				debug!(target: LOG_TARGET, "New unknown `FromSwarm` libp2p event: {event:?}");
 				self.kademlia.on_swarm_event(event);
 			},
 		}
@@ -834,7 +837,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						if self.num_connections < self.discovery_only_if_under_num {
 							let random_peer_id = PeerId::random();
 							debug!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p <= Starting random Kademlia request for {:?}",
 								random_peer_id,
 							);
@@ -842,7 +845,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 							true
 						} else {
 							debug!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Kademlia paused due to high number of connections ({})",
 								self.num_connections
 							);
@@ -899,20 +902,20 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 					} => match res {
 						Err(GetClosestPeersError::Timeout { key, peers }) => {
 							debug!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Query for {:?} timed out with {} results",
 								HexDisplay::from(&key), peers.len(),
 							);
 						},
 						Ok(ok) => {
 							trace!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Query for {:?} yielded {:?} results",
 								HexDisplay::from(&ok.key), ok.peers.len(),
 							);
 							if ok.peers.is_empty() && self.num_connections != 0 {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Random Kademlia query has yielded empty results",
 								);
 							}
@@ -927,7 +930,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						let ev = match res {
 							Ok(GetRecordOk::FoundRecord(r)) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Found record ({:?}) with value: {:?} id {:?} stats {:?}",
 									r.record.key,
 									r.record.value,
@@ -959,7 +962,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 								cache_candidates,
 							}) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Finished with no-additional-record {:?} stats {:?} took {:?} ms",
 									id,
 									stats,
@@ -986,7 +989,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 							},
 							Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => {
 								trace!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Failed to get record: {:?}",
 									e,
 								);
@@ -997,7 +1000,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 							},
 							Err(e) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Failed to get record: {:?}",
 									e,
 								);
@@ -1018,7 +1021,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						let ev = match res {
 							Ok(GetProvidersOk::FoundProviders { key, providers }) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Found providers {:?} for key {:?}, id {:?}, stats {:?}",
 									providers,
 									key,
@@ -1036,7 +1039,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 								closest_peers: _,
 							}) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Finished with no additional providers {:?}, stats {:?}, took {:?} ms",
 									id,
 									stats,
@@ -1047,7 +1050,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 							},
 							Err(GetProvidersError::Timeout { key, closest_peers: _ }) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Failed to get providers for {key:?} due to timeout.",
 								);
 
@@ -1069,7 +1072,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 								DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_default()),
 							Err(e) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Failed to put record: {:?}",
 									e,
 								);
@@ -1086,12 +1089,12 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						..
 					} => match res {
 						Ok(ok) => debug!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Libp2p => Record republished: {:?}",
 							ok.key,
 						),
 						Err(e) => debug!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Libp2p => Republishing of record {:?} failed with: {:?}",
 							e.key(), e,
 						),
@@ -1101,20 +1104,20 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						..
 					} => match res {
 						Ok(ok) => debug!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Libp2p => DHT bootstrap progressed: {ok:?}",
 						),
 						Err(e) => warn!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Libp2p => DHT bootstrap error: {e:?}",
 						),
 					},
 					// We never start any other type of query.
 					KademliaEvent::OutboundQueryProgressed { result: e, .. } => {
-						warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e)
+						warn!(target: LOG_TARGET, "Libp2p => Unhandled Kademlia event: {:?}", e)
 					},
 					Event::ModeChanged { new_mode } => {
-						debug!(target: "sub-libp2p", "Libp2p => Kademlia mode changed: {new_mode}")
+						debug!(target: LOG_TARGET, "Libp2p => Kademlia mode changed: {new_mode}")
 					},
 				},
 				ToSwarm::Dial { opts } => return Poll::Ready(ToSwarm::Dial { opts }),
diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs
index e8e132228ca8f82d1a32ff2508b5fe84efc242ca..4f105936ac5632d40a648a2fe11857dc3d110c48 100644
--- a/substrate/client/network/src/litep2p/mod.rs
+++ b/substrate/client/network/src/litep2p/mod.rs
@@ -321,10 +321,6 @@ impl Litep2pNetworkBackend {
 			yamux_config.set_window_update_mode(litep2p::yamux::WindowUpdateMode::OnRead);
 			yamux_config.set_max_buffer_size(yamux_maximum_buffer_size);
 
-			if let Some(yamux_window_size) = config.network_config.yamux_window_size {
-				yamux_config.set_receive_window(yamux_window_size);
-			}
-
 			yamux_config
 		};
 
diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs
index e6909fcdefeaf874b5014dc24f41174182767b81..217ef304bd0fc6c96fffc33fdb08b8ae38a1c47d 100644
--- a/substrate/client/network/src/protocol/notifications/behaviour.rs
+++ b/substrate/client/network/src/protocol/notifications/behaviour.rs
@@ -60,13 +60,13 @@ use std::{
 	time::{Duration, Instant},
 };
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::notification::behaviour";
+
 /// Type representing a pending substream validation.
 type PendingInboundValidation =
 	BoxFuture<'static, (Result<ValidationResult, RecvError>, IncomingIndex)>;
 
-/// Logging target for the file.
-const LOG_TARGET: &str = "sub-libp2p";
-
 /// Network behaviour that handles opening substreams for custom protocols with other peers.
 ///
 /// # How it works
@@ -470,7 +470,7 @@ impl Notifications {
 		if let Some(p) = self.notif_protocols.get_mut(usize::from(set_id)) {
 			*p.handshake.write() = handshake_message.into();
 		} else {
-			log::error!(target: "sub-libp2p", "Unknown handshake change set: {:?}", set_id);
+			log::error!(target: LOG_TARGET, "Unknown handshake change set: {:?}", set_id);
 			debug_assert!(false);
 		}
 	}
@@ -487,7 +487,7 @@ impl Notifications {
 
 	/// Disconnects the given peer if we are connected to it.
 	pub fn disconnect_peer(&mut self, peer_id: &PeerId, set_id: SetId) {
-		trace!(target: "sub-libp2p", "External API => Disconnect({}, {:?})", peer_id, set_id);
+		trace!(target: LOG_TARGET, "External API => Disconnect({}, {:?})", peer_id, set_id);
 		self.disconnect_peer_inner(peer_id, set_id);
 	}
 
@@ -508,7 +508,7 @@ impl Notifications {
 
 			// DisabledPendingEnable => Disabled.
 			PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => {
-				trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+				trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 				self.protocol_controller_handles[usize::from(set_id)].dropped(*peer_id);
 				*entry.into_mut() =
 					PeerState::Disabled { connections, backoff_until: Some(timer_deadline) }
@@ -518,11 +518,11 @@ impl Notifications {
 			// All open or opening connections are sent a `Close` message.
 			// If relevant, the external API is instantly notified.
 			PeerState::Enabled { mut connections } => {
-				trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+				trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 				self.protocol_controller_handles[usize::from(set_id)].dropped(*peer_id);
 
 				if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) {
-					trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id);
+					trace!(target: LOG_TARGET, "External API <= Closed({}, {:?})", peer_id, set_id);
 					let event =
 						NotificationsOut::CustomProtocolClosed { peer_id: *peer_id, set_id };
 					self.events.push_back(ToSwarm::GenerateEvent(event));
@@ -531,7 +531,7 @@ impl Notifications {
 				for (connec_id, connec_state) in
 					connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Open(_)))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: *peer_id,
 						handler: NotifyHandler::One(*connec_id),
@@ -543,7 +543,7 @@ impl Notifications {
 				for (connec_id, connec_state) in
 					connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Opening))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: *peer_id,
 						handler: NotifyHandler::One(*connec_id),
@@ -573,7 +573,7 @@ impl Notifications {
 					inc
 				} else {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"State mismatch in libp2p: no entry in incoming for incoming peer"
 					);
 					return
@@ -585,7 +585,7 @@ impl Notifications {
 					.iter_mut()
 					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: *peer_id,
 						handler: NotifyHandler::One(*connec_id),
@@ -601,7 +601,7 @@ impl Notifications {
 			},
 
 			PeerState::Poisoned => {
-				error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id)
+				error!(target: LOG_TARGET, "State of {:?} is poisoned", peer_id)
 			},
 		}
 	}
@@ -614,12 +614,12 @@ impl Notifications {
 			Entry::Vacant(entry) => {
 				// If there's no entry in `self.peers`, start dialing.
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Starting to connect",
 					entry.key().0,
 					set_id,
 				);
-				trace!(target: "sub-libp2p", "Libp2p <= Dial {}", entry.key().0);
+				trace!(target: LOG_TARGET, "Libp2p <= Dial {}", entry.key().0);
 				self.events.push_back(ToSwarm::Dial { opts: entry.key().0.into() });
 				entry.insert(PeerState::Requested);
 				return
@@ -633,7 +633,7 @@ impl Notifications {
 			PeerState::Backoff { ref timer, ref timer_deadline } if *timer_deadline > now => {
 				let peer_id = occ_entry.key().0;
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Will start to connect at until {:?}",
 					peer_id,
 					set_id,
@@ -646,12 +646,12 @@ impl Notifications {
 			// Backoff (expired) => Requested
 			PeerState::Backoff { .. } => {
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Starting to connect",
 					occ_entry.key().0,
 					set_id,
 				);
-				trace!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key());
+				trace!(target: LOG_TARGET, "Libp2p <= Dial {:?}", occ_entry.key());
 				self.events.push_back(ToSwarm::Dial { opts: occ_entry.key().0.into() });
 				*occ_entry.into_mut() = PeerState::Requested;
 			},
@@ -662,7 +662,7 @@ impl Notifications {
 			{
 				let peer_id = occ_entry.key().0;
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): But peer is backed-off until {:?}",
 					peer_id,
 					set_id,
@@ -697,9 +697,9 @@ impl Notifications {
 				if let Some((connec_id, connec_state)) =
 					connections.iter_mut().find(|(_, s)| matches!(s, ConnectionState::Closed))
 				{
-					trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.",
+					trace!(target: LOG_TARGET, "PSM => Connect({}, {:?}): Enabling connections.",
 						occ_entry.key().0, set_id);
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *connec_id, set_id);
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})", peer_id, *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id,
 						handler: NotifyHandler::One(*connec_id),
@@ -714,7 +714,7 @@ impl Notifications {
 						matches!(s, ConnectionState::OpeningThenClosing | ConnectionState::Closing)
 					}));
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Connect({}, {:?}): No connection in proper state. Delaying.",
 						occ_entry.key().0, set_id
 					);
@@ -750,7 +750,7 @@ impl Notifications {
 			// Incoming => Incoming
 			st @ PeerState::Incoming { .. } => {
 				debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Ignoring obsolete connect, we are awaiting accept/reject.",
 					occ_entry.key().0, set_id
 				);
@@ -759,26 +759,26 @@ impl Notifications {
 
 			// Other states are kept as-is.
 			st @ PeerState::Enabled { .. } => {
-				debug!(target: "sub-libp2p",
+				debug!(target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Already connected.",
 					occ_entry.key().0, set_id);
 				*occ_entry.into_mut() = st;
 			},
 			st @ PeerState::DisabledPendingEnable { .. } => {
-				debug!(target: "sub-libp2p",
+				debug!(target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Already pending enabling.",
 					occ_entry.key().0, set_id);
 				*occ_entry.into_mut() = st;
 			},
 			st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => {
-				debug!(target: "sub-libp2p",
+				debug!(target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Duplicate request.",
 					occ_entry.key().0, set_id);
 				*occ_entry.into_mut() = st;
 			},
 
 			PeerState::Poisoned => {
-				error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key());
+				error!(target: LOG_TARGET, "State of {:?} is poisoned", occ_entry.key());
 				debug_assert!(false);
 			},
 		}
@@ -789,7 +789,7 @@ impl Notifications {
 		let mut entry = match self.peers.entry((peer_id, set_id)) {
 			Entry::Occupied(entry) => entry,
 			Entry::Vacant(entry) => {
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Already disabled.",
 					entry.key().0, set_id);
 				return
 			},
@@ -797,7 +797,7 @@ impl Notifications {
 
 		match mem::replace(entry.get_mut(), PeerState::Poisoned) {
 			st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => {
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Already disabled.",
 					entry.key().0, set_id);
 				*entry.into_mut() = st;
 			},
@@ -805,7 +805,7 @@ impl Notifications {
 			// DisabledPendingEnable => Disabled
 			PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => {
 				debug_assert!(!connections.is_empty());
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"PSM => Drop({}, {:?}): Interrupting pending enabling.",
 					entry.key().0, set_id);
 				*entry.into_mut() =
@@ -814,7 +814,7 @@ impl Notifications {
 
 			// Enabled => Disabled
 			PeerState::Enabled { mut connections } => {
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Disabling connections.",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Disabling connections.",
 					entry.key().0, set_id);
 
 				debug_assert!(connections.iter().any(|(_, s)| matches!(
@@ -823,7 +823,7 @@ impl Notifications {
 				)));
 
 				if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) {
-					trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", entry.key().0, set_id);
+					trace!(target: LOG_TARGET, "External API <= Closed({}, {:?})", entry.key().0, set_id);
 					let event =
 						NotificationsOut::CustomProtocolClosed { peer_id: entry.key().0, set_id };
 					self.events.push_back(ToSwarm::GenerateEvent(event));
@@ -832,7 +832,7 @@ impl Notifications {
 				for (connec_id, connec_state) in
 					connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Opening))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})",
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})",
 						entry.key(), *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: entry.key().0,
@@ -845,7 +845,7 @@ impl Notifications {
 				for (connec_id, connec_state) in
 					connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Open(_)))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})",
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})",
 						entry.key(), *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: entry.key().0,
@@ -863,14 +863,14 @@ impl Notifications {
 				// We don't cancel dialing. Libp2p doesn't expose that on purpose, as other
 				// sub-systems (such as the discovery mechanism) may require dialing this peer as
 				// well at the same time.
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected.",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Not yet connected.",
 					entry.key().0, set_id);
 				entry.remove();
 			},
 
 			// PendingRequest => Backoff
 			PeerState::PendingRequest { timer, timer_deadline } => {
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Not yet connected",
 					entry.key().0, set_id);
 				*entry.into_mut() = PeerState::Backoff { timer, timer_deadline }
 			},
@@ -880,7 +880,7 @@ impl Notifications {
 			// the protocol, reject the substream
 			PeerState::Incoming { backoff_until, connections, incoming_index, .. } => {
 				debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Drop({}, {:?}): Ignoring obsolete disconnect, we are awaiting accept/reject.",
 					entry.key().0, set_id,
 				);
@@ -892,7 +892,7 @@ impl Notifications {
 				};
 			},
 			PeerState::Poisoned => {
-				error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key());
+				error!(target: LOG_TARGET, "State of {:?} is poisoned", entry.key());
 				debug_assert!(false);
 			},
 		}
@@ -944,19 +944,19 @@ impl Notifications {
 			if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) {
 				(pos, self.incoming.get(pos))
 			} else {
-				error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index);
+				error!(target: LOG_TARGET, "PSM => Accept({:?}): Invalid index", index);
 				return
 			};
 
 		let Some(incoming) = incoming else {
-			error!(target: "sub-libp2p", "Incoming connection ({:?}) doesn't exist", index);
+			error!(target: LOG_TARGET, "Incoming connection ({:?}) doesn't exist", index);
 			debug_assert!(false);
 			return;
 		};
 
 		if !incoming.alive {
 			trace!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"PSM => Accept({:?}, {}, {:?}): Obsolete incoming",
 				index,
 				incoming.peer_id,
@@ -967,7 +967,7 @@ impl Notifications {
 				Some(PeerState::DisabledPendingEnable { .. }) | Some(PeerState::Enabled { .. }) => {
 				},
 				_ => {
-					trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})",
+					trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})",
 						incoming.peer_id, incoming.set_id);
 					self.protocol_controller_handles[usize::from(incoming.set_id)]
 						.dropped(incoming.peer_id);
@@ -982,7 +982,7 @@ impl Notifications {
 			Some(s) => s,
 			None => {
 				log::debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Connection to {:?} closed, ({:?} {:?}), ignoring accept",
 					incoming.peer_id,
 					incoming.set_id,
@@ -1003,7 +1003,7 @@ impl Notifications {
 			} => {
 				if index < incoming_index {
 					warn!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Accept({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
@@ -1012,7 +1012,7 @@ impl Notifications {
 					return
 				} else if index > incoming_index {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Accept({:?}, {}, {:?}): Ignoring incoming index from the future, we are awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
@@ -1026,7 +1026,7 @@ impl Notifications {
 				// for the it to be closed so reject the substream now
 				if peerset_rejected {
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Protocol accepted ({:?} {:?} {:?}) but Peerset had request disconnection, rejecting",
 						index,
 						incoming.peer_id,
@@ -1043,7 +1043,7 @@ impl Notifications {
 				}
 
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Accept({:?}, {}, {:?}): Enabling connections.",
 					index,
 					incoming.peer_id,
@@ -1057,7 +1057,7 @@ impl Notifications {
 					.iter_mut()
 					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})",
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})",
 						incoming.peer_id, *connec_id, incoming.set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: incoming.peer_id,
@@ -1077,7 +1077,7 @@ impl Notifications {
 			// Any state other than `Incoming` is invalid.
 			peer => {
 				error!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"State mismatch in libp2p: Expected alive incoming. Got {:?}.",
 					peer
 				);
@@ -1106,13 +1106,13 @@ impl Notifications {
 		{
 			self.incoming.remove(pos)
 		} else {
-			error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index);
+			error!(target: LOG_TARGET, "PSM => Reject({:?}): Invalid index", index);
 			return None
 		};
 
 		if !incoming.alive {
 			trace!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"PSM => Reject({:?}, {}, {:?}): Obsolete incoming, ignoring",
 				index,
 				incoming.peer_id,
@@ -1126,7 +1126,7 @@ impl Notifications {
 			Some(s) => s,
 			None => {
 				log::debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Connection to {:?} closed, ({:?} {:?}), ignoring accept",
 					incoming.peer_id,
 					incoming.set_id,
@@ -1141,14 +1141,14 @@ impl Notifications {
 			PeerState::Incoming { mut connections, backoff_until, incoming_index, .. } => {
 				if index < incoming_index {
 					warn!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Reject({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
 					return None
 				} else if index > incoming_index {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Reject({:?}, {}, {:?}): Ignoring incoming index from the future, we are awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
@@ -1156,7 +1156,7 @@ impl Notifications {
 					return None
 				}
 
-				trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.",
+				trace!(target: LOG_TARGET, "PSM => Reject({:?}, {}, {:?}): Rejecting connections.",
 					index, incoming.peer_id, incoming.set_id);
 
 				debug_assert!(connections
@@ -1166,7 +1166,7 @@ impl Notifications {
 					.iter_mut()
 					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})",
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})",
 						incoming.peer_id, connec_id, incoming.set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: incoming.peer_id,
@@ -1252,11 +1252,11 @@ impl NetworkBehaviour for Notifications {
 						// Requested | PendingRequest => Enabled
 						st @ &mut PeerState::Requested |
 						st @ &mut PeerState::PendingRequest { .. } => {
-							trace!(target: "sub-libp2p",
+							trace!(target: LOG_TARGET,
 								"Libp2p => Connected({}, {:?}, {:?}): Connection was requested by PSM.",
 								peer_id, set_id, endpoint
 							);
-							trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, connection_id, set_id);
+							trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})", peer_id, connection_id, set_id);
 							self.events.push_back(ToSwarm::NotifyHandler {
 								peer_id,
 								handler: NotifyHandler::One(connection_id),
@@ -1277,7 +1277,7 @@ impl NetworkBehaviour for Notifications {
 								} else {
 									None
 								};
-							trace!(target: "sub-libp2p",
+							trace!(target: LOG_TARGET,
 								"Libp2p => Connected({}, {:?}, {:?}, {:?}): Not requested by PSM, disabling.",
 								peer_id, set_id, endpoint, connection_id);
 
@@ -1292,7 +1292,7 @@ impl NetworkBehaviour for Notifications {
 						PeerState::Disabled { connections, .. } |
 						PeerState::DisabledPendingEnable { connections, .. } |
 						PeerState::Enabled { connections, .. } => {
-							trace!(target: "sub-libp2p",
+							trace!(target: LOG_TARGET,
 								"Libp2p => Connected({}, {:?}, {:?}, {:?}): Secondary connection. Leaving closed.",
 								peer_id, set_id, endpoint, connection_id);
 							connections.push((connection_id, ConnectionState::Closed));
@@ -1307,7 +1307,7 @@ impl NetworkBehaviour for Notifications {
 					{
 						entry
 					} else {
-						error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler");
+						error!(target: LOG_TARGET, "inject_connection_closed: State mismatch in the custom protos handler");
 						debug_assert!(false);
 						return
 					};
@@ -1315,7 +1315,7 @@ impl NetworkBehaviour for Notifications {
 					match mem::replace(entry.get_mut(), PeerState::Poisoned) {
 						// Disabled => Disabled | Backoff | Ø
 						PeerState::Disabled { mut connections, backoff_until } => {
-							trace!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): Disabled.",
+							trace!(target: LOG_TARGET, "Libp2p => Disconnected({}, {:?}, {:?}): Disabled.",
 								peer_id, set_id, connection_id);
 
 							if let Some(pos) =
@@ -1324,7 +1324,7 @@ impl NetworkBehaviour for Notifications {
 								connections.remove(pos);
 							} else {
 								debug_assert!(false);
-								error!(target: "sub-libp2p",
+								error!(target: LOG_TARGET,
 									"inject_connection_closed: State mismatch in the custom protos handler");
 							}
 
@@ -1366,7 +1366,7 @@ impl NetworkBehaviour for Notifications {
 							timer,
 						} => {
 							trace!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Disconnected({}, {:?}, {:?}): Disabled but pending enable.",
 								peer_id, set_id, connection_id
 							);
@@ -1376,13 +1376,13 @@ impl NetworkBehaviour for Notifications {
 							{
 								connections.remove(pos);
 							} else {
-								error!(target: "sub-libp2p",
+								error!(target: LOG_TARGET,
 									"inject_connection_closed: State mismatch in the custom protos handler");
 								debug_assert!(false);
 							}
 
 							if connections.is_empty() {
-								trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.dropped(peer_id);
 								*entry.get_mut() = PeerState::Backoff { timer, timer_deadline };
@@ -1403,7 +1403,7 @@ impl NetworkBehaviour for Notifications {
 							peerset_rejected,
 						} => {
 							trace!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Disconnected({}, {:?}, {:?}): OpenDesiredByRemote.",
 								peer_id, set_id, connection_id
 							);
@@ -1417,7 +1417,7 @@ impl NetworkBehaviour for Notifications {
 							{
 								connections.remove(pos);
 							} else {
-								error!(target: "sub-libp2p",
+								error!(target: LOG_TARGET,
 									"inject_connection_closed: State mismatch in the custom protos handler");
 								debug_assert!(false);
 							}
@@ -1439,7 +1439,7 @@ impl NetworkBehaviour for Notifications {
 								{
 									state.alive = false;
 								} else {
-									error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \
+									error!(target: LOG_TARGET, "State mismatch in libp2p: no entry in \
 										incoming corresponding to an incoming state in peers");
 									debug_assert!(false);
 								}
@@ -1489,7 +1489,7 @@ impl NetworkBehaviour for Notifications {
 						// Peers are always backed-off when disconnecting while Enabled.
 						PeerState::Enabled { mut connections } => {
 							trace!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Disconnected({}, {:?}, {:?}): Enabled.",
 								peer_id, set_id, connection_id
 							);
@@ -1513,7 +1513,7 @@ impl NetworkBehaviour for Notifications {
 										}) {
 										if pos <= replacement_pos {
 											trace!(
-												target: "sub-libp2p",
+												target: LOG_TARGET,
 												"External API <= Sink replaced({}, {:?})",
 												peer_id, set_id
 											);
@@ -1526,7 +1526,7 @@ impl NetworkBehaviour for Notifications {
 										}
 									} else {
 										trace!(
-											target: "sub-libp2p", "External API <= Closed({}, {:?})",
+											target: LOG_TARGET, "External API <= Closed({}, {:?})",
 											peer_id, set_id
 										);
 										let event = NotificationsOut::CustomProtocolClosed {
@@ -1537,13 +1537,13 @@ impl NetworkBehaviour for Notifications {
 									}
 								}
 							} else {
-								error!(target: "sub-libp2p",
+								error!(target: LOG_TARGET,
 									"inject_connection_closed: State mismatch in the custom protos handler");
 								debug_assert!(false);
 							}
 
 							if connections.is_empty() {
-								trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.dropped(peer_id);
 								let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng());
@@ -1566,7 +1566,7 @@ impl NetworkBehaviour for Notifications {
 							} else if !connections.iter().any(|(_, s)| {
 								matches!(s, ConnectionState::Opening | ConnectionState::Open(_))
 							}) {
-								trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.dropped(peer_id);
 
@@ -1581,13 +1581,13 @@ impl NetworkBehaviour for Notifications {
 						PeerState::PendingRequest { .. } |
 						PeerState::Backoff { .. } => {
 							// This is a serious bug either in this state machine or in libp2p.
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"`inject_connection_closed` called for unknown peer {}",
 								peer_id);
 							debug_assert!(false);
 						},
 						PeerState::Poisoned => {
-							error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id);
+							error!(target: LOG_TARGET, "State of peer {} is poisoned", peer_id);
 							debug_assert!(false);
 						},
 					}
@@ -1596,12 +1596,12 @@ impl NetworkBehaviour for Notifications {
 			FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => {
 				if let DialError::Transport(errors) = error {
 					for (addr, error) in errors.iter() {
-						trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error);
+						trace!(target: LOG_TARGET, "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error);
 					}
 				}
 
 				if let Some(peer_id) = peer_id {
-					trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id);
+					trace!(target: LOG_TARGET, "Libp2p => Dial failure for {:?}", peer_id);
 
 					for set_id in (0..self.notif_protocols.len()).map(SetId::from) {
 						if let Entry::Occupied(mut entry) = self.peers.entry((peer_id, set_id)) {
@@ -1615,7 +1615,7 @@ impl NetworkBehaviour for Notifications {
 								// requested.
 								st @ PeerState::Requested |
 								st @ PeerState::PendingRequest { .. } => {
-									trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+									trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 									self.protocol_controller_handles[usize::from(set_id)]
 										.dropped(peer_id);
 
@@ -1654,7 +1654,7 @@ impl NetworkBehaviour for Notifications {
 								},
 
 								PeerState::Poisoned => {
-									error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id);
+									error!(target: LOG_TARGET, "State of {:?} is poisoned", peer_id);
 									debug_assert!(false);
 								},
 							}
@@ -1673,7 +1673,7 @@ impl NetworkBehaviour for Notifications {
 			FromSwarm::AddressChange(_) => {},
 			FromSwarm::NewListenAddr(_) => {},
 			event => {
-				warn!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}");
+				warn!(target: LOG_TARGET, "New unknown `FromSwarm` libp2p event: {event:?}");
 			},
 		}
 	}
@@ -1688,7 +1688,7 @@ impl NetworkBehaviour for Notifications {
 			NotifsHandlerOut::OpenDesiredByRemote { protocol_index, handshake } => {
 				let set_id = SetId::from(protocol_index);
 
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({:?}, {:?}]) => OpenDesiredByRemote({:?})",
 					peer_id, connection_id, set_id);
 
@@ -1697,7 +1697,7 @@ impl NetworkBehaviour for Notifications {
 					entry
 				} else {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"OpenDesiredByRemote: State mismatch in the custom protos handler"
 					);
 					debug_assert!(false);
@@ -1733,7 +1733,7 @@ impl NetworkBehaviour for Notifications {
 							}
 						} else {
 							error!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
 							debug_assert!(false);
@@ -1757,7 +1757,7 @@ impl NetworkBehaviour for Notifications {
 							connections.iter_mut().find(|(c, _)| *c == connection_id)
 						{
 							if let ConnectionState::Closed = *connec_state {
-								trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})",
+								trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})",
 									peer_id, connection_id, set_id);
 								self.events.push_back(ToSwarm::NotifyHandler {
 									peer_id,
@@ -1779,7 +1779,7 @@ impl NetworkBehaviour for Notifications {
 							}
 						} else {
 							error!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
 							debug_assert!(false);
@@ -1799,7 +1799,7 @@ impl NetworkBehaviour for Notifications {
 								let incoming_id = self.next_incoming_index;
 								self.next_incoming_index.0 += 1;
 
-								trace!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}, {:?}).",
+								trace!(target: LOG_TARGET, "PSM <= Incoming({}, {:?}, {:?}).",
 									peer_id, set_id, incoming_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.incoming_connection(peer_id, incoming_id);
@@ -1831,7 +1831,7 @@ impl NetworkBehaviour for Notifications {
 							}
 						} else {
 							error!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
 							debug_assert!(false);
@@ -1844,7 +1844,7 @@ impl NetworkBehaviour for Notifications {
 							connections.iter_mut().find(|(c, _)| *c == connection_id)
 						{
 							if let ConnectionState::Closed = *connec_state {
-								trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})",
+								trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})",
 									peer_id, connection_id, set_id);
 								self.events.push_back(ToSwarm::NotifyHandler {
 									peer_id,
@@ -1871,7 +1871,7 @@ impl NetworkBehaviour for Notifications {
 							}
 						} else {
 							error!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
 							debug_assert!(false);
@@ -1879,7 +1879,7 @@ impl NetworkBehaviour for Notifications {
 					},
 
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							   "OpenDesiredByRemote: Unexpected state in the custom protos handler: {:?}",
 							   state);
 						debug_assert!(false);
@@ -1890,7 +1890,7 @@ impl NetworkBehaviour for Notifications {
 			NotifsHandlerOut::CloseDesired { protocol_index } => {
 				let set_id = SetId::from(protocol_index);
 
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({}, {:?}) => CloseDesired({:?})",
 					peer_id, connection_id, set_id);
 
@@ -1898,7 +1898,7 @@ impl NetworkBehaviour for Notifications {
 				{
 					entry
 				} else {
-					error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler");
+					error!(target: LOG_TARGET, "CloseDesired: State mismatch in the custom protos handler");
 					debug_assert!(false);
 					return
 				};
@@ -1916,7 +1916,7 @@ impl NetworkBehaviour for Notifications {
 						{
 							pos
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"CloseDesired: State mismatch in the custom protos handler");
 							debug_assert!(false);
 							return
@@ -1930,7 +1930,7 @@ impl NetworkBehaviour for Notifications {
 						debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_)));
 						connections[pos].1 = ConnectionState::Closing;
 
-						trace!(target: "sub-libp2p", "Handler({}, {:?}) <= Close({:?})", peer_id, connection_id, set_id);
+						trace!(target: LOG_TARGET, "Handler({}, {:?}) <= Close({:?})", peer_id, connection_id, set_id);
 						self.events.push_back(ToSwarm::NotifyHandler {
 							peer_id,
 							handler: NotifyHandler::One(connection_id),
@@ -1943,7 +1943,7 @@ impl NetworkBehaviour for Notifications {
 								_ => None,
 							}) {
 							if pos <= replacement_pos {
-								trace!(target: "sub-libp2p", "External API <= Sink replaced({:?}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "External API <= Sink replaced({:?}, {:?})", peer_id, set_id);
 								let event = NotificationsOut::CustomProtocolReplaced {
 									peer_id,
 									set_id,
@@ -1959,7 +1959,7 @@ impl NetworkBehaviour for Notifications {
 								.iter()
 								.any(|(_, s)| matches!(s, ConnectionState::Opening))
 							{
-								trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.dropped(peer_id);
 								*entry.into_mut() =
@@ -1968,7 +1968,7 @@ impl NetworkBehaviour for Notifications {
 								*entry.into_mut() = PeerState::Enabled { connections };
 							}
 
-							trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id);
+							trace!(target: LOG_TARGET, "External API <= Closed({}, {:?})", peer_id, set_id);
 							let event = NotificationsOut::CustomProtocolClosed { peer_id, set_id };
 							self.events.push_back(ToSwarm::GenerateEvent(event));
 						}
@@ -1981,7 +1981,7 @@ impl NetworkBehaviour for Notifications {
 						*entry.into_mut() = state;
 					},
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							"Unexpected state in the custom protos handler: {:?}",
 							state);
 					},
@@ -1991,7 +1991,7 @@ impl NetworkBehaviour for Notifications {
 			NotifsHandlerOut::CloseResult { protocol_index } => {
 				let set_id = SetId::from(protocol_index);
 
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({}, {:?}) => CloseResult({:?})",
 					peer_id, connection_id, set_id);
 
@@ -2006,14 +2006,14 @@ impl NetworkBehaviour for Notifications {
 						}) {
 							*connec_state = ConnectionState::Closed;
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"CloseResult: State mismatch in the custom protos handler");
 							debug_assert!(false);
 						}
 					},
 
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							   "CloseResult: Unexpected state in the custom protos handler: {:?}",
 							   state);
 						debug_assert!(false);
@@ -2030,7 +2030,7 @@ impl NetworkBehaviour for Notifications {
 				..
 			} => {
 				let set_id = SetId::from(protocol_index);
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({}, {:?}) => OpenResultOk({:?})",
 					peer_id, connection_id, set_id);
 
@@ -2047,7 +2047,7 @@ impl NetworkBehaviour for Notifications {
 							*c == connection_id && matches!(s, ConnectionState::Opening)
 						}) {
 							if !any_open {
-								trace!(target: "sub-libp2p", "External API <= Open({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "External API <= Open({}, {:?})", peer_id, set_id);
 								let event = NotificationsOut::CustomProtocolOpen {
 									peer_id,
 									set_id,
@@ -2070,7 +2070,7 @@ impl NetworkBehaviour for Notifications {
 							}) {
 							*connec_state = ConnectionState::Closing;
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"OpenResultOk State mismatch in the custom protos handler");
 							debug_assert!(false);
 						}
@@ -2084,14 +2084,14 @@ impl NetworkBehaviour for Notifications {
 						}) {
 							*connec_state = ConnectionState::Closing;
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"OpenResultOk State mismatch in the custom protos handler");
 							debug_assert!(false);
 						}
 					},
 
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							   "OpenResultOk: Unexpected state in the custom protos handler: {:?}",
 							   state);
 						debug_assert!(false);
@@ -2101,7 +2101,7 @@ impl NetworkBehaviour for Notifications {
 
 			NotifsHandlerOut::OpenResultErr { protocol_index } => {
 				let set_id = SetId::from(protocol_index);
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({:?}, {:?}) => OpenResultErr({:?})",
 					peer_id, connection_id, set_id);
 
@@ -2109,7 +2109,7 @@ impl NetworkBehaviour for Notifications {
 				{
 					entry
 				} else {
-					error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler");
+					error!(target: LOG_TARGET, "OpenResultErr: State mismatch in the custom protos handler");
 					debug_assert!(false);
 					return
 				};
@@ -2132,7 +2132,7 @@ impl NetworkBehaviour for Notifications {
 							}) {
 							*connec_state = ConnectionState::Closing;
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"OpenResultErr: State mismatch in the custom protos handler");
 							debug_assert!(false);
 						}
@@ -2140,7 +2140,7 @@ impl NetworkBehaviour for Notifications {
 						if !connections.iter().any(|(_, s)| {
 							matches!(s, ConnectionState::Opening | ConnectionState::Open(_))
 						}) {
-							trace!(target: "sub-libp2p", "PSM <= Dropped({:?}, {:?})", peer_id, set_id);
+							trace!(target: LOG_TARGET, "PSM <= Dropped({:?}, {:?})", peer_id, set_id);
 							self.protocol_controller_handles[usize::from(set_id)].dropped(peer_id);
 
 							let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng());
@@ -2166,7 +2166,7 @@ impl NetworkBehaviour for Notifications {
 									}) {
 									*connec_state = ConnectionState::Closing;
 								} else {
-									error!(target: "sub-libp2p",
+									error!(target: LOG_TARGET,
 										"OpenResultErr: State mismatch in the custom protos handler");
 									debug_assert!(false);
 								}
@@ -2180,7 +2180,7 @@ impl NetworkBehaviour for Notifications {
 						*entry.into_mut() = state;
 					},
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							"Unexpected state in the custom protos handler: {:?}",
 							state);
 						debug_assert!(false);
@@ -2192,7 +2192,7 @@ impl NetworkBehaviour for Notifications {
 				let set_id = SetId::from(protocol_index);
 				if self.is_open(&peer_id, set_id) {
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Handler({:?}) => Notification({}, {:?}, {} bytes)",
 						connection_id,
 						peer_id,
@@ -2200,7 +2200,7 @@ impl NetworkBehaviour for Notifications {
 						message.len()
 					);
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"External API <= Message({}, {:?})",
 						peer_id,
 						set_id,
@@ -2213,7 +2213,7 @@ impl NetworkBehaviour for Notifications {
 					self.events.push_back(ToSwarm::GenerateEvent(event));
 				} else {
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Handler({:?}) => Post-close notification({}, {:?}, {} bytes)",
 						connection_id,
 						peer_id,
@@ -2225,7 +2225,7 @@ impl NetworkBehaviour for Notifications {
 			NotifsHandlerOut::Close { protocol_index } => {
 				let set_id = SetId::from(protocol_index);
 
-				trace!(target: "sub-libp2p", "Handler({}, {:?}) => SyncNotificationsClogged({:?})", peer_id, connection_id, set_id);
+				trace!(target: LOG_TARGET, "Handler({}, {:?}) => SyncNotificationsClogged({:?})", peer_id, connection_id, set_id);
 				self.events.push_back(ToSwarm::CloseConnection {
 					peer_id,
 					connection: CloseConnection::One(connection_id),
@@ -2256,7 +2256,7 @@ impl NetworkBehaviour for Notifications {
 				},
 				Poll::Ready(None) => {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Protocol controllers receiver stream has returned `None`. Ignore this error if the node is shutting down.",
 					);
 					break
@@ -2314,12 +2314,12 @@ impl NetworkBehaviour for Notifications {
 
 			match peer_state {
 				PeerState::Backoff { timer, .. } if *timer == delay_id => {
-					trace!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state ({:?})", peer_id, set_id);
+					trace!(target: LOG_TARGET, "Libp2p <= Clean up ban of {:?} from the state ({:?})", peer_id, set_id);
 					self.peers.remove(&(peer_id, set_id));
 				},
 
 				PeerState::PendingRequest { timer, .. } if *timer == delay_id => {
-					trace!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired ({:?})", peer_id, set_id);
+					trace!(target: LOG_TARGET, "Libp2p <= Dial {:?} now that ban has expired ({:?})", peer_id, set_id);
 					self.events.push_back(ToSwarm::Dial { opts: peer_id.into() });
 					*peer_state = PeerState::Requested;
 				},
@@ -2331,7 +2331,7 @@ impl NetworkBehaviour for Notifications {
 					if let Some((connec_id, connec_state)) =
 						connections.iter_mut().find(|(_, s)| matches!(s, ConnectionState::Closed))
 					{
-						trace!(target: "sub-libp2p", "Handler({}, {:?}) <= Open({:?}) (ban expired)",
+						trace!(target: LOG_TARGET, "Handler({}, {:?}) <= Open({:?}) (ban expired)",
 							peer_id, *connec_id, set_id);
 						self.events.push_back(ToSwarm::NotifyHandler {
 							peer_id,
diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs
index 332de9f19c410f8ac2c9914dfd7d25e6215b7c01..416a35ad88c9ac1cf24675c37904998bb8d943f3 100644
--- a/substrate/client/network/src/protocol/notifications/handler.rs
+++ b/substrate/client/network/src/protocol/notifications/handler.rs
@@ -79,7 +79,7 @@ use libp2p::{
 	},
 	PeerId,
 };
-use log::{error, warn};
+
 use parking_lot::{Mutex, RwLock};
 use std::{
 	collections::VecDeque,
@@ -90,6 +90,9 @@ use std::{
 	time::Duration,
 };
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::notification::handler";
+
 /// Number of pending notifications in asynchronous contexts.
 /// See [`NotificationsSink::reserve_notification`] for context.
 pub(crate) const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8;
@@ -561,7 +564,7 @@ impl ConnectionHandler for NotifsHandler {
 						*pending_opening = false;
 					},
 					State::Open { .. } => {
-						error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler");
+						log::error!(target: LOG_TARGET, "☎️ State mismatch in notifications handler");
 						debug_assert!(false);
 					},
 					State::Opening { ref mut in_substream, inbound } => {
@@ -622,7 +625,7 @@ impl ConnectionHandler for NotifsHandler {
 			},
 			ConnectionEvent::ListenUpgradeError(_listen_upgrade_error) => {},
 			event => {
-				warn!(target: "sub-libp2p", "New unknown `ConnectionEvent` libp2p event: {event:?}");
+				log::warn!(target: LOG_TARGET, "New unknown `ConnectionEvent` libp2p event: {event:?}");
 			},
 		}
 	}
@@ -686,7 +689,7 @@ impl ConnectionHandler for NotifsHandler {
 					State::Opening { .. } | State::Open { .. } => {
 						// As documented, it is forbidden to send an `Open` while there is already
 						// one in the fly.
-						error!(target: "sub-libp2p", "opening already-opened handler");
+						log::error!(target: LOG_TARGET, "opening already-opened handler");
 						debug_assert!(false);
 					},
 				}
diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs
index a7eb31fc5795d6ee25d5769292428f40f3237d65..fe88a8793766e5b861bedcd5b3d3ffb87a8b4c96 100644
--- a/substrate/client/network/src/protocol/notifications/service/mod.rs
+++ b/substrate/client/network/src/protocol/notifications/service/mod.rs
@@ -49,7 +49,7 @@ pub(crate) mod metrics;
 mod tests;
 
 /// Logging target for the file.
-const LOG_TARGET: &str = "sub-libp2p";
+const LOG_TARGET: &str = "sub-libp2p::notification::service";
 
 /// Default command queue size.
 const COMMAND_QUEUE_SIZE: usize = 64;
diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs
index 9e8a03fc07c9c3490394428c355364f7a0065345..b4d0de171a183935d38c928dc5b935b95d42e2e6 100644
--- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs
+++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs
@@ -50,6 +50,9 @@ use std::{
 	vec,
 };
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::notification::upgrade";
+
 /// Maximum allowed size of the two handshake messages, in bytes.
 const MAX_HANDSHAKE_SIZE: usize = 1024;
 
@@ -210,7 +213,7 @@ where
 	/// Sends the handshake in order to inform the remote that we accept the substream.
 	pub fn send_handshake(&mut self, message: impl Into<Vec<u8>>) {
 		if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) {
-			error!(target: "sub-libp2p", "Tried to send handshake twice");
+			error!(target: LOG_TARGET, "Tried to send handshake twice");
 			return
 		}
 
@@ -349,7 +352,7 @@ impl NotificationsOut {
 	) -> Self {
 		let initial_message = initial_message.into();
 		if initial_message.len() > MAX_HANDSHAKE_SIZE {
-			error!(target: "sub-libp2p", "Outbound networking handshake is above allowed protocol limit");
+			error!(target: LOG_TARGET, "Outbound networking handshake is above allowed protocol limit");
 		}
 
 		let mut protocol_names = fallback_names;
@@ -464,7 +467,7 @@ where
 			Poll::Pending => {},
 			Poll::Ready(Some(_)) => {
 				error!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Unexpected incoming data in `NotificationsOutSubstream`",
 				);
 			},
diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs
index e21773632ed7756185873429187fb5a595326040..ac87224549f97f18fa740020bea578b2e979c2a2 100644
--- a/substrate/client/network/src/request_responses.rs
+++ b/substrate/client/network/src/request_responses.rs
@@ -64,6 +64,9 @@ use std::{
 
 pub use libp2p::request_response::{Config, InboundRequestId, OutboundRequestId};
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::request-response";
+
 /// Periodically check if requests are taking too long.
 const PERIODIC_REQUEST_CHECK: Duration = Duration::from_secs(2);
 
@@ -461,7 +464,7 @@ impl RequestResponsesBehaviour {
 		pending_response: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
 		connect: IfDisconnected,
 	) {
-		log::trace!(target: "sub-libp2p", "send request to {target} ({protocol_name:?}), {} bytes", request.len());
+		log::trace!(target: LOG_TARGET, "send request to {target} ({protocol_name:?}), {} bytes", request.len());
 
 		if let Some(ProtocolDetails { behaviour, .. }) =
 			self.protocols.get_mut(protocol_name.deref())
@@ -478,7 +481,7 @@ impl RequestResponsesBehaviour {
 			)
 		} else if pending_response.send(Err(RequestFailure::UnknownProtocol)).is_err() {
 			log::debug!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"Unknown protocol {:?}. At the same time local \
 				 node is no longer interested in the result.",
 				protocol_name,
@@ -509,7 +512,7 @@ impl RequestResponsesBehaviour {
 			debug_assert!(prev_req_id.is_none(), "Expect request id to be unique.");
 		} else if pending_response.send(Err(RequestFailure::NotConnected)).is_err() {
 			log::debug!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"Not connected to peer {:?}. At the same time local \
 				 node is no longer interested in the result.",
 				target,
@@ -615,7 +618,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 			return behaviour.on_connection_handler_event(peer_id, connection_id, event.1)
 		} else {
 			log::warn!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"on_connection_handler_event: no request-response instance registered for protocol {:?}",
 				p_name
 			);
@@ -631,14 +634,14 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 						self.protocols.get(&id.protocol)
 					else {
 						log::warn!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Request {id:?} has no protocol registered.",
 						);
 
 						if let Some(response_tx) = req.response_tx.take() {
 							if response_tx.send(Err(RequestFailure::UnknownProtocol)).is_err() {
 								log::debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Request {id:?} has no protocol registered. At the same time local node is no longer interested in the result.",
 								);
 							}
@@ -649,14 +652,14 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 					let elapsed = req.started_at.elapsed();
 					if elapsed > *request_timeout {
 						log::debug!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Request {id:?} force detected as timeout.",
 						);
 
 						if let Some(response_tx) = req.response_tx.take() {
 							if response_tx.send(Err(RequestFailure::Network(OutboundFailure::Timeout))).is_err() {
 								log::debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Request {id:?} force detected as timeout. At the same time local node is no longer interested in the result.",
 								);
 							}
@@ -688,13 +691,13 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 					if let Some(ProtocolDetails { behaviour, .. }) =
 						self.protocols.get_mut(&*protocol_name)
 					{
-						log::trace!(target: "sub-libp2p", "send response to {peer} ({protocol_name:?}), {} bytes", payload.len());
+						log::trace!(target: LOG_TARGET, "send response to {peer} ({protocol_name:?}), {} bytes", payload.len());
 
 						if behaviour.send_response(inner_channel, Ok(payload)).is_err() {
 							// Note: Failure is handled further below when receiving
 							// `InboundFailure` event from request-response [`Behaviour`].
 							log::debug!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Failed to send response for {:?} on protocol {:?} due to a \
 								 timeout or due to the connection to the peer being closed. \
 								 Dropping response",
@@ -730,7 +733,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 						ToSwarm::Dial { opts } => {
 							if opts.get_peer_id().is_none() {
 								log::error!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"The request-response isn't supposed to start dialing addresses"
 								);
 							}
@@ -762,7 +765,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 
 							if reputation < BANNED_THRESHOLD {
 								log::debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Cannot handle requests from a node with a low reputation {}: {}",
 									peer,
 									reputation,
@@ -828,7 +831,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 									..
 								}) => {
 									log::trace!(
-										target: "sub-libp2p",
+										target: LOG_TARGET,
 										"received response from {peer} ({protocol:?}), {} bytes",
 										response.as_ref().map_or(0usize, |response| response.len()),
 									);
@@ -844,7 +847,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 								},
 								_ => {
 									log::debug!(
-										target: "sub-libp2p",
+										target: LOG_TARGET,
 										"Received `RequestResponseEvent::Message` with unexpected request id {:?} from {:?}",
 										request_id,
 										peer,
@@ -887,7 +890,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 											fallback_request
 										{
 											log::trace!(
-												target: "sub-libp2p",
+												target: LOG_TARGET,
 												"Request with id {:?} failed. Trying the fallback protocol. {}",
 												request_id,
 												fallback_protocol.deref()
@@ -907,7 +910,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 										.is_err()
 									{
 										log::debug!(
-											target: "sub-libp2p",
+											target: LOG_TARGET,
 											"Request with id {:?} failed. At the same time local \
 											 node is no longer interested in the result.",
 											request_id,
@@ -917,7 +920,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 								},
 								_ => {
 									log::debug!(
-										target: "sub-libp2p",
+										target: LOG_TARGET,
 										"Received `RequestResponseEvent::OutboundFailure` with unexpected request id {:?} error {:?} from {:?}",
 										request_id,
 										error,
diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs
index b4463ad480891eafe40199d9ff7c5ef23a111681..3f6ff7c5f6dfa75b73d4a55e0e61c3f452cacb63 100644
--- a/substrate/client/network/src/service.rs
+++ b/substrate/client/network/src/service.rs
@@ -110,6 +110,9 @@ pub(crate) mod out_events;
 pub mod signature;
 pub mod traits;
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p";
+
 struct Libp2pBandwidthSink {
 	#[allow(deprecated)]
 	sink: Arc<transport::BandwidthSinks>,
@@ -287,7 +290,7 @@ where
 			.filter(|reserved_node| {
 				if reserved_node.peer_id == local_peer_id.into() {
 					warn!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Local peer ID used in reserved node, ignoring: {}",
 						reserved_node,
 					);
@@ -329,11 +332,11 @@ where
 		}
 
 		info!(
-			target: "sub-libp2p",
+			target: LOG_TARGET,
 			"🏷  Local node identity is: {}",
 			local_peer_id.to_base58(),
 		);
-		info!(target: "sub-libp2p", "Running libp2p network backend");
+		info!(target: LOG_TARGET, "Running libp2p network backend");
 
 		let (transport, bandwidth) = {
 			let config_mem = match network_config.transport {
@@ -569,7 +572,7 @@ where
 		// Listen on multiaddresses.
 		for addr in &network_config.listen_addresses {
 			if let Err(err) = Swarm::<Behaviour<B>>::listen_on(&mut swarm, addr.clone().into()) {
-				warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err)
+				warn!(target: LOG_TARGET, "Can't listen on {} because: {:?}", addr, err)
 			}
 		}
 
@@ -681,7 +684,7 @@ where
 						) {
 						addrs.into_iter().collect()
 					} else {
-						error!(target: "sub-libp2p", "Was not able to get known addresses for {:?}", peer_id);
+						error!(target: LOG_TARGET, "Was not able to get known addresses for {:?}", peer_id);
 						return None
 					};
 
@@ -690,7 +693,7 @@ where
 					{
 						e.clone().into()
 					} else {
-						error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \
+						error!(target: LOG_TARGET, "Found state inconsistency between custom protocol \
 						and debug information about {:?}", peer_id);
 						return None
 					};
@@ -732,7 +735,7 @@ where
 						) {
 						addrs.into_iter().collect()
 					} else {
-						error!(target: "sub-libp2p", "Was not able to get known addresses for {:?}", peer_id);
+						error!(target: LOG_TARGET, "Was not able to get known addresses for {:?}", peer_id);
 						Default::default()
 					};
 
@@ -1145,7 +1148,7 @@ where
 		match Roles::decode_all(&mut &handshake[..]) {
 			Ok(role) => Some(role.into()),
 			Err(_) => {
-				log::debug!(target: "sub-libp2p", "handshake doesn't contain peer role: {handshake:?}");
+				log::debug!(target: LOG_TARGET, "handshake doesn't contain peer role: {handshake:?}");
 				self.peer_store_handle.peer_role(&(peer_id.into()))
 			},
 		}
@@ -1278,11 +1281,11 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> {
 		}
 
 		trace!(
-			target: "sub-libp2p",
+			target: LOG_TARGET,
 			"External API => Notification({:?}, {}, {} bytes)",
 			self.peer_id, self.protocol_name, notification.len(),
 		);
-		trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id);
+		trace!(target: LOG_TARGET, "Handler({:?}) <= Async notification", self.peer_id);
 
 		self.ready
 			.take()
@@ -1570,7 +1573,7 @@ where
 			}) => {
 				if listen_addrs.len() > 30 {
 					debug!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Node {:?} has reported more than 30 addresses; it is identified by {:?} and {:?}",
 						peer_id, protocol_version, agent_version
 					);
@@ -1684,9 +1687,9 @@ where
 				..
 			} => {
 				if let Some(errors) = concurrent_dial_errors {
-					debug!(target: "sub-libp2p", "Libp2p => Connected({:?}) with errors: {:?}", peer_id, errors);
+					debug!(target: LOG_TARGET, "Libp2p => Connected({:?}) with errors: {:?}", peer_id, errors);
 				} else {
-					debug!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id);
+					debug!(target: LOG_TARGET, "Libp2p => Connected({:?})", peer_id);
 				}
 
 				if let Some(metrics) = self.metrics.as_ref() {
@@ -1708,7 +1711,7 @@ where
 				endpoint,
 				num_established,
 			} => {
-				debug!(target: "sub-libp2p", "Libp2p => Disconnected({peer_id:?} via {connection_id:?}, {cause:?})");
+				debug!(target: LOG_TARGET, "Libp2p => Disconnected({peer_id:?} via {connection_id:?}, {cause:?})");
 				if let Some(metrics) = self.metrics.as_ref() {
 					let direction = match endpoint {
 						ConnectedPoint::Dialer { .. } => "out",
@@ -1728,14 +1731,14 @@ where
 				}
 			},
 			SwarmEvent::NewListenAddr { address, .. } => {
-				trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", address);
+				trace!(target: LOG_TARGET, "Libp2p => NewListenAddr({})", address);
 				if let Some(metrics) = self.metrics.as_ref() {
 					metrics.listeners_local_addresses.inc();
 				}
 				self.listen_addresses.lock().insert(address.clone());
 			},
 			SwarmEvent::ExpiredListenAddr { address, .. } => {
-				info!(target: "sub-libp2p", "📪 No longer listening on {}", address);
+				info!(target: LOG_TARGET, "📪 No longer listening on {}", address);
 				if let Some(metrics) = self.metrics.as_ref() {
 					metrics.listeners_local_addresses.dec();
 				}
@@ -1744,7 +1747,7 @@ where
 			SwarmEvent::OutgoingConnectionError { connection_id, peer_id, error } => {
 				if let Some(peer_id) = peer_id {
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Libp2p => Failed to reach {peer_id:?} via {connection_id:?}: {error}",
 					);
 
@@ -1800,10 +1803,10 @@ where
 				}
 			},
 			SwarmEvent::Dialing { connection_id, peer_id } => {
-				trace!(target: "sub-libp2p", "Libp2p => Dialing({peer_id:?}) via {connection_id:?}")
+				trace!(target: LOG_TARGET, "Libp2p => Dialing({peer_id:?}) via {connection_id:?}")
 			},
 			SwarmEvent::IncomingConnection { connection_id, local_addr, send_back_addr } => {
-				trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({local_addr},{send_back_addr} via {connection_id:?}))");
+				trace!(target: LOG_TARGET, "Libp2p => IncomingConnection({local_addr},{send_back_addr} via {connection_id:?}))");
 				if let Some(metrics) = self.metrics.as_ref() {
 					metrics.incoming_connections_total.inc();
 				}
@@ -1815,7 +1818,7 @@ where
 				error,
 			} => {
 				debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Libp2p => IncomingConnectionError({local_addr},{send_back_addr} via {connection_id:?}): {error}"
 				);
 				if let Some(metrics) = self.metrics.as_ref() {
@@ -1854,37 +1857,37 @@ where
 					addresses.into_iter().map(|a| a.to_string()).collect::<Vec<_>>().join(", ");
 				match reason {
 					Ok(()) => error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"📪 Libp2p listener ({}) closed gracefully",
 						addrs
 					),
 					Err(e) => error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"📪 Libp2p listener ({}) closed: {}",
 						addrs, e
 					),
 				}
 			},
 			SwarmEvent::ListenerError { error, .. } => {
-				debug!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error);
+				debug!(target: LOG_TARGET, "Libp2p => ListenerError: {}", error);
 				if let Some(metrics) = self.metrics.as_ref() {
 					metrics.listeners_errors_total.inc();
 				}
 			},
 			SwarmEvent::NewExternalAddrCandidate { address } => {
-				trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrCandidate: {address:?}");
+				trace!(target: LOG_TARGET, "Libp2p => NewExternalAddrCandidate: {address:?}");
 			},
 			SwarmEvent::ExternalAddrConfirmed { address } => {
-				trace!(target: "sub-libp2p", "Libp2p => ExternalAddrConfirmed: {address:?}");
+				trace!(target: LOG_TARGET, "Libp2p => ExternalAddrConfirmed: {address:?}");
 			},
 			SwarmEvent::ExternalAddrExpired { address } => {
-				trace!(target: "sub-libp2p", "Libp2p => ExternalAddrExpired: {address:?}");
+				trace!(target: LOG_TARGET, "Libp2p => ExternalAddrExpired: {address:?}");
 			},
 			SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => {
-				trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrOfPeer({peer_id:?}): {address:?}")
+				trace!(target: LOG_TARGET, "Libp2p => NewExternalAddrOfPeer({peer_id:?}): {address:?}")
 			},
 			event => {
-				warn!(target: "sub-libp2p", "New unknown SwarmEvent libp2p event: {event:?}");
+				warn!(target: LOG_TARGET, "New unknown SwarmEvent libp2p event: {event:?}");
 			},
 		}
 	}
diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs
index be65f49e6e4ea406cc742f153009fb8d22ca4fd5..234fd59ef89fdc8ec602f69ab483fd30782efc2d 100644
--- a/substrate/frame/alliance/src/lib.rs
+++ b/substrate/frame/alliance/src/lib.rs
@@ -204,7 +204,17 @@ pub enum MemberRole {
 }
 
 /// The type of item that may be deemed unscrupulous.
-#[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum UnscrupulousItem<AccountId, Url> {
 	AccountId(AccountId),
 	Website(Url),
diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs
index 069c29a88d38c1ad1dad483d78e931df0cce90db..b2b4ba4d5bf91aa87caa32d05f2b288704e67e64 100644
--- a/substrate/frame/alliance/src/mock.rs
+++ b/substrate/frame/alliance/src/mock.rs
@@ -126,7 +126,7 @@ impl pallet_identity::Config for Test {
 	type WeightInfo = ();
 }
 
-#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq, TypeInfo)]
+#[derive(Clone, Debug, Encode, Decode, DecodeWithMemTracking, PartialEq, Eq, TypeInfo)]
 pub struct AccountU64(u64);
 impl IdentifyAccount for AccountU64 {
 	type AccountId = u64;
diff --git a/substrate/frame/alliance/src/types.rs b/substrate/frame/alliance/src/types.rs
index 75b949c19b3256d0276679662fb8b1fc8a81acc1..2c7112652d664d5e332fcaaa06a5f25cee4d040a 100644
--- a/substrate/frame/alliance/src/types.rs
+++ b/substrate/frame/alliance/src/types.rs
@@ -16,14 +16,24 @@
 // limitations under the License.
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use frame_support::{traits::ConstU32, BoundedVec};
 use scale_info::TypeInfo;
 use sp_runtime::RuntimeDebug;
 
 /// A Multihash instance that only supports the basic functionality and no hashing.
 #[derive(
-	Clone, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen,
+	Clone,
+	PartialEq,
+	Eq,
+	PartialOrd,
+	Ord,
+	RuntimeDebug,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+	MaxEncodedLen,
 )]
 pub struct Multihash {
 	/// The code of the Multihash.
@@ -50,6 +60,7 @@ impl Multihash {
 	RuntimeDebug,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	TypeInfo,
 	MaxEncodedLen,
 )]
@@ -64,7 +75,17 @@ pub enum Version {
 ///
 /// The generic is about the allocated size of the multihash.
 #[derive(
-	Clone, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen,
+	Clone,
+	PartialEq,
+	Eq,
+	PartialOrd,
+	Ord,
+	RuntimeDebug,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+	MaxEncodedLen,
 )]
 pub struct Cid {
 	/// The version of CID.
@@ -96,7 +117,17 @@ impl Cid {
 
 /// Witness data for the `disband` call.
 #[derive(
-	Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo, Default,
+	Copy,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	MaxEncodedLen,
+	TypeInfo,
+	Default,
 )]
 pub struct DisbandWitness {
 	/// Total number of fellow members in the current Alliance.
diff --git a/substrate/frame/asset-conversion/src/types.rs b/substrate/frame/asset-conversion/src/types.rs
index 27c0e8e68805ea7716ed7371aeed532be906fa47..1fc989e71675dac378a9b8d9dfd0ffbb759003e3 100644
--- a/substrate/frame/asset-conversion/src/types.rs
+++ b/substrate/frame/asset-conversion/src/types.rs
@@ -29,7 +29,7 @@ use sp_runtime::traits::TryConvert;
 /// 1. `asset(asset1, amount_in)` take from `user` and move to the pool(asset1, asset2);
 /// 2. `asset(asset2, amount_out2)` transfer from pool(asset1, asset2) to pool(asset2, asset3);
 /// 3. `asset(asset3, amount_out3)` move from pool(asset2, asset3) to `user`.
-pub(super) type BalancePath<T> = Vec<(<T as Config>::AssetKind, <T as Config>::Balance)>;
+pub type BalancePath<T> = Vec<(<T as Config>::AssetKind, <T as Config>::Balance)>;
 
 /// Credit of [Config::Assets].
 pub type CreditOf<T> = Credit<<T as frame_system::Config>::AccountId, <T as Config>::Assets>;
diff --git a/substrate/frame/assets-freezer/src/lib.rs b/substrate/frame/assets-freezer/src/lib.rs
index 61a695a6f5b8111b7f9eb06927655c84e2ef1713..e298658f16dbc5e0c004b773f4aed9e08acc36c5 100644
--- a/substrate/frame/assets-freezer/src/lib.rs
+++ b/substrate/frame/assets-freezer/src/lib.rs
@@ -105,7 +105,7 @@ pub mod pallet {
 
 	/// A map that stores freezes applied on an account for a given AssetId.
 	#[pallet::storage]
-	pub(super) type Freezes<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type Freezes<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
@@ -120,7 +120,7 @@ pub mod pallet {
 
 	/// A map that stores the current total frozen balance for every account on a given AssetId.
 	#[pallet::storage]
-	pub(super) type FrozenBalances<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type FrozenBalances<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs
index 9ea346c4cf3fde31c26fa5e940618ce473b693ee..6e946d610e07365f9225f339c80fd9d7fa5d9ecf 100644
--- a/substrate/frame/assets/src/lib.rs
+++ b/substrate/frame/assets/src/lib.rs
@@ -419,7 +419,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Details of an asset.
-	pub(super) type Asset<T: Config<I>, I: 'static = ()> = StorageMap<
+	pub type Asset<T: Config<I>, I: 'static = ()> = StorageMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
@@ -428,7 +428,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// The holdings of a specific account for a specific asset.
-	pub(super) type Account<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type Account<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
@@ -441,7 +441,7 @@ pub mod pallet {
 	/// Approved balance transfers. First balance is the amount approved for transfer. Second
 	/// is the amount of `T::Currency` reserved for storing this.
 	/// First key is the asset ID, second key is the owner and third key is the delegate.
-	pub(super) type Approvals<T: Config<I>, I: 'static = ()> = StorageNMap<
+	pub type Approvals<T: Config<I>, I: 'static = ()> = StorageNMap<
 		_,
 		(
 			NMapKey<Blake2_128Concat, T::AssetId>,
@@ -453,7 +453,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Metadata of an asset.
-	pub(super) type Metadata<T: Config<I>, I: 'static = ()> = StorageMap<
+	pub type Metadata<T: Config<I>, I: 'static = ()> = StorageMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
diff --git a/substrate/frame/assets/src/types.rs b/substrate/frame/assets/src/types.rs
index 9a60a13f5a71c993d460666f860d8e64731267a3..baa530565bceae25ba7f8fbb39b9456a963e8cd2 100644
--- a/substrate/frame/assets/src/types.rs
+++ b/substrate/frame/assets/src/types.rs
@@ -24,21 +24,21 @@ use frame_support::{
 };
 use sp_runtime::{traits::Convert, FixedPointNumber, FixedU128};
 
-pub(super) type DepositBalanceOf<T, I = ()> =
+pub type DepositBalanceOf<T, I = ()> =
 	<<T as Config<I>>::Currency as Currency<<T as SystemConfig>::AccountId>>::Balance;
-pub(super) type AssetAccountOf<T, I> = AssetAccount<
+pub type AssetAccountOf<T, I> = AssetAccount<
 	<T as Config<I>>::Balance,
 	DepositBalanceOf<T, I>,
 	<T as Config<I>>::Extra,
 	<T as SystemConfig>::AccountId,
 >;
-pub(super) type ExistenceReasonOf<T, I> =
+pub type ExistenceReasonOf<T, I> =
 	ExistenceReason<DepositBalanceOf<T, I>, <T as SystemConfig>::AccountId>;
 
 /// AssetStatus holds the current state of the asset. It could either be Live and available for use,
 /// or in a Destroying state.
 #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
-pub(super) enum AssetStatus {
+pub enum AssetStatus {
 	/// The asset is active and able to be used.
 	Live,
 	/// Whether the asset is frozen for non-admin transfers.
@@ -51,30 +51,30 @@ pub(super) enum AssetStatus {
 #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
 pub struct AssetDetails<Balance, AccountId, DepositBalance> {
 	/// Can change `owner`, `issuer`, `freezer` and `admin` accounts.
-	pub(super) owner: AccountId,
+	pub owner: AccountId,
 	/// Can mint tokens.
-	pub(super) issuer: AccountId,
+	pub issuer: AccountId,
 	/// Can thaw tokens, force transfers and burn tokens from any account.
-	pub(super) admin: AccountId,
+	pub admin: AccountId,
 	/// Can freeze tokens.
-	pub(super) freezer: AccountId,
+	pub freezer: AccountId,
 	/// The total supply across all accounts.
-	pub(super) supply: Balance,
+	pub supply: Balance,
 	/// The balance deposited for this asset. This pays for the data stored here.
-	pub(super) deposit: DepositBalance,
+	pub deposit: DepositBalance,
 	/// The ED for virtual accounts.
-	pub(super) min_balance: Balance,
+	pub min_balance: Balance,
 	/// If `true`, then any account with this asset is given a provider reference. Otherwise, it
 	/// requires a consumer reference.
-	pub(super) is_sufficient: bool,
+	pub is_sufficient: bool,
 	/// The total number of accounts.
-	pub(super) accounts: u32,
+	pub accounts: u32,
 	/// The total number of accounts for which we have placed a self-sufficient reference.
-	pub(super) sufficients: u32,
+	pub sufficients: u32,
 	/// The total number of approvals.
-	pub(super) approvals: u32,
+	pub approvals: u32,
 	/// The status of the asset
-	pub(super) status: AssetStatus,
+	pub status: AssetStatus,
 }
 
 /// Data concerning an approval.
@@ -82,9 +82,9 @@ pub struct AssetDetails<Balance, AccountId, DepositBalance> {
 pub struct Approval<Balance, DepositBalance> {
 	/// The amount of funds approved for the balance transfer from the owner to some delegated
 	/// target.
-	pub(super) amount: Balance,
+	pub amount: Balance,
 	/// The amount reserved on the owner's account to hold this item in storage.
-	pub(super) deposit: DepositBalance,
+	pub deposit: DepositBalance,
 }
 
 #[test]
@@ -118,7 +118,7 @@ impl<Balance, AccountId> ExistenceReason<Balance, AccountId>
 where
 	AccountId: Clone,
 {
-	pub(crate) fn take_deposit(&mut self) -> Option<Balance> {
+	pub fn take_deposit(&mut self) -> Option<Balance> {
 		if !matches!(self, ExistenceReason::DepositHeld(_)) {
 			return None
 		}
@@ -131,7 +131,7 @@ where
 		}
 	}
 
-	pub(crate) fn take_deposit_from(&mut self) -> Option<(AccountId, Balance)> {
+	pub fn take_deposit_from(&mut self) -> Option<(AccountId, Balance)> {
 		if !matches!(self, ExistenceReason::DepositFrom(..)) {
 			return None
 		}
@@ -163,11 +163,11 @@ pub enum AccountStatus {
 }
 impl AccountStatus {
 	/// Returns `true` if frozen or blocked.
-	pub(crate) fn is_frozen(&self) -> bool {
+	pub fn is_frozen(&self) -> bool {
 		matches!(self, AccountStatus::Frozen | AccountStatus::Blocked)
 	}
 	/// Returns `true` if blocked.
-	pub(crate) fn is_blocked(&self) -> bool {
+	pub fn is_blocked(&self) -> bool {
 		matches!(self, AccountStatus::Blocked)
 	}
 }
@@ -178,13 +178,13 @@ pub struct AssetAccount<Balance, DepositBalance, Extra, AccountId> {
 	///
 	/// The part of the `balance` may be frozen by the [`Config::Freezer`]. The on-hold portion is
 	/// not included here and is tracked by the [`Config::Holder`].
-	pub(super) balance: Balance,
+	pub balance: Balance,
 	/// The status of the account.
-	pub(super) status: AccountStatus,
+	pub status: AccountStatus,
 	/// The reason for the existence of the account.
-	pub(super) reason: ExistenceReason<DepositBalance, AccountId>,
+	pub reason: ExistenceReason<DepositBalance, AccountId>,
 	/// Additional "sidecar" data, in case some other pallet wants to use this storage item.
-	pub(super) extra: Extra,
+	pub extra: Extra,
 }
 
 #[derive(Clone, Encode, Decode, Eq, PartialEq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo)]
@@ -192,15 +192,15 @@ pub struct AssetMetadata<DepositBalance, BoundedString> {
 	/// The balance deposited for this metadata.
 	///
 	/// This pays for the data stored in this struct.
-	pub(super) deposit: DepositBalance,
+	pub deposit: DepositBalance,
 	/// The user friendly name of this asset. Limited in length by `StringLimit`.
-	pub(super) name: BoundedString,
+	pub name: BoundedString,
 	/// The ticker symbol for this asset. Limited in length by `StringLimit`.
-	pub(super) symbol: BoundedString,
+	pub symbol: BoundedString,
 	/// The number of decimals this asset uses to represent one unit.
-	pub(super) decimals: u8,
+	pub decimals: u8,
 	/// Whether the asset metadata may be changed by a non Force origin.
-	pub(super) is_frozen: bool,
+	pub is_frozen: bool,
 }
 
 /// Trait for allowing a minimum balance on the account to be specified, beyond the
@@ -275,28 +275,28 @@ impl<AssetId, AccountId, Balance> BalanceOnHold<AssetId, AccountId, Balance> for
 }
 
 #[derive(Copy, Clone, PartialEq, Eq)]
-pub(super) struct TransferFlags {
+pub struct TransferFlags {
 	/// The debited account must stay alive at the end of the operation; an error is returned if
 	/// this cannot be achieved legally.
-	pub(super) keep_alive: bool,
+	pub keep_alive: bool,
 	/// Less than the amount specified needs be debited by the operation for it to be considered
 	/// successful. If `false`, then the amount debited will always be at least the amount
 	/// specified.
-	pub(super) best_effort: bool,
+	pub best_effort: bool,
 	/// Any additional funds debited (due to minimum balance requirements) should be burned rather
 	/// than credited to the destination account.
-	pub(super) burn_dust: bool,
+	pub burn_dust: bool,
 }
 
 #[derive(Copy, Clone, PartialEq, Eq)]
-pub(super) struct DebitFlags {
+pub struct DebitFlags {
 	/// The debited account must stay alive at the end of the operation; an error is returned if
 	/// this cannot be achieved legally.
-	pub(super) keep_alive: bool,
+	pub keep_alive: bool,
 	/// Less than the amount specified needs be debited by the operation for it to be considered
 	/// successful. If `false`, then the amount debited will always be at least the amount
 	/// specified.
-	pub(super) best_effort: bool,
+	pub best_effort: bool,
 }
 
 impl From<TransferFlags> for DebitFlags {
diff --git a/substrate/frame/atomic-swap/src/lib.rs b/substrate/frame/atomic-swap/src/lib.rs
index 9521f20fe0092c30c946c0b91c32e3c70c83404b..f35f0efa618abbb5db4812e4e2ca501a4c24de7e 100644
--- a/substrate/frame/atomic-swap/src/lib.rs
+++ b/substrate/frame/atomic-swap/src/lib.rs
@@ -45,7 +45,7 @@ mod tests;
 extern crate alloc;
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use core::{
 	marker::PhantomData,
 	ops::{Deref, DerefMut},
@@ -57,7 +57,17 @@ use frame::{
 use scale_info::TypeInfo;
 
 /// Pending atomic swap operation.
-#[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	Eq,
+	PartialEq,
+	RuntimeDebugNoBound,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 #[scale_info(skip_type_params(T))]
 #[codec(mel_bound())]
 pub struct PendingSwap<T: Config> {
@@ -92,7 +102,17 @@ pub trait SwapAction<AccountId, T: Config> {
 }
 
 /// A swap action that only allows transferring balances.
-#[derive(Clone, RuntimeDebug, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	RuntimeDebug,
+	Eq,
+	PartialEq,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 #[scale_info(skip_type_params(C))]
 #[codec(mel_bound())]
 pub struct BalanceSwapAction<AccountId, C: ReservableCurrency<AccountId>> {
diff --git a/substrate/frame/authority-discovery/src/lib.rs b/substrate/frame/authority-discovery/src/lib.rs
index 220b39292b57598cdc1def77baef6874d7165c95..6883a07ad2891b216c988bb9b6f67cdb76fe6464 100644
--- a/substrate/frame/authority-discovery/src/lib.rs
+++ b/substrate/frame/authority-discovery/src/lib.rs
@@ -51,12 +51,12 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Keys of the current authority set.
-	pub(super) type Keys<T: Config> =
+	pub type Keys<T: Config> =
 		StorageValue<_, WeakBoundedVec<AuthorityId, T::MaxAuthorities>, ValueQuery>;
 
 	#[pallet::storage]
 	/// Keys of the next authority set.
-	pub(super) type NextKeys<T: Config> =
+	pub type NextKeys<T: Config> =
 		StorageValue<_, WeakBoundedVec<AuthorityId, T::MaxAuthorities>, ValueQuery>;
 
 	#[derive(frame_support::DefaultNoBound)]
@@ -210,6 +210,7 @@ mod tests {
 		type ValidatorId = AuthorityId;
 		type ValidatorIdOf = ConvertInto;
 		type NextSessionRotation = pallet_session::PeriodicSessions<Period, Offset>;
+		type DisablingStrategy = ();
 		type WeightInfo = ();
 	}
 
diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs
index 1e4f51d5143098cd146fed6f35743482a6cfb1f1..ea977a547fee80ba6545c53221e9535d37ffbf16 100644
--- a/substrate/frame/babe/src/mock.rs
+++ b/substrate/frame/babe/src/mock.rs
@@ -100,12 +100,13 @@ impl pallet_session::Config for Test {
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
 	type SessionHandler = <MockSessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = MockSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<u64, u128>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Self>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 impl pallet_authorship::Config for Test {
diff --git a/substrate/frame/bags-list/src/lib.rs b/substrate/frame/bags-list/src/lib.rs
index ae65cc0783c93aa58330bb75298d819b6b81d7b5..606b07b6e7b6f302b013cf2a9a9ec8738208e0b9 100644
--- a/substrate/frame/bags-list/src/lib.rs
+++ b/substrate/frame/bags-list/src/lib.rs
@@ -253,14 +253,14 @@ pub mod pallet {
 	///
 	/// Nodes store links forward and back within their respective bags.
 	#[pallet::storage]
-	pub(crate) type ListNodes<T: Config<I>, I: 'static = ()> =
+	pub type ListNodes<T: Config<I>, I: 'static = ()> =
 		CountedStorageMap<_, Twox64Concat, T::AccountId, list::Node<T, I>>;
 
 	/// A bag stored in storage.
 	///
 	/// Stores a `Bag` struct, which stores head and tail pointers to itself.
 	#[pallet::storage]
-	pub(crate) type ListBags<T: Config<I>, I: 'static = ()> =
+	pub type ListBags<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::Score, list::Bag<T, I>>;
 
 	#[pallet::event]
@@ -273,7 +273,6 @@ pub mod pallet {
 	}
 
 	#[pallet::error]
-	#[cfg_attr(test, derive(PartialEq))]
 	pub enum Error<T, I = ()> {
 		/// A error in the list interface implementation.
 		List(ListError),
diff --git a/substrate/frame/bags-list/src/list/mod.rs b/substrate/frame/bags-list/src/list/mod.rs
index 6b0d1afcd8b2891a39fad53f91562e455c7d01a0..1fe4ffffaa65804fe03cdaf3296dbf92ca680fd3 100644
--- a/substrate/frame/bags-list/src/list/mod.rs
+++ b/substrate/frame/bags-list/src/list/mod.rs
@@ -29,13 +29,13 @@ use alloc::{
 	boxed::Box,
 	collections::{btree_map::BTreeMap, btree_set::BTreeSet},
 };
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::{iter, marker::PhantomData};
 use frame_election_provider_support::ScoreProvider;
 use frame_support::{
 	defensive, ensure,
 	traits::{Defensive, DefensiveOption, Get},
-	DefaultNoBound, PalletError,
+	CloneNoBound, DefaultNoBound, EqNoBound, PalletError, PartialEqNoBound, RuntimeDebugNoBound,
 };
 use scale_info::TypeInfo;
 use sp_runtime::traits::{Bounded, Zero};
@@ -51,7 +51,17 @@ use alloc::vec::Vec;
 #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))]
 use sp_runtime::TryRuntimeError;
 
-#[derive(Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo, PalletError)]
+#[derive(
+	Debug,
+	PartialEq,
+	Eq,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	TypeInfo,
+	PalletError,
+)]
 pub enum ListError {
 	/// A duplicate id has been detected.
 	Duplicate,
@@ -622,18 +632,27 @@ impl<T: Config<I>, I: 'static> List<T, I> {
 /// desirable to ensure that there is some element of first-come, first-serve to the list's
 /// iteration so that there's no incentive to churn ids positioning to improve the chances of
 /// appearing within the ids set.
-#[derive(DefaultNoBound, Encode, Decode, MaxEncodedLen, TypeInfo)]
+#[derive(
+	DefaultNoBound,
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	RuntimeDebugNoBound,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+)]
 #[codec(mel_bound())]
 #[scale_info(skip_type_params(T, I))]
-#[cfg_attr(feature = "std", derive(frame_support::DebugNoBound, Clone, PartialEq))]
 pub struct Bag<T: Config<I>, I: 'static = ()> {
-	head: Option<T::AccountId>,
-	tail: Option<T::AccountId>,
+	pub head: Option<T::AccountId>,
+	pub tail: Option<T::AccountId>,
 
 	#[codec(skip)]
-	bag_upper: T::Score,
+	pub bag_upper: T::Score,
 	#[codec(skip)]
-	_phantom: PhantomData<I>,
+	pub _phantom: PhantomData<I>,
 }
 
 impl<T: Config<I>, I: 'static> Bag<T, I> {
@@ -822,18 +841,26 @@ impl<T: Config<I>, I: 'static> Bag<T, I> {
 }
 
 /// A Node is the fundamental element comprising the doubly-linked list described by `Bag`.
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo)]
+#[derive(
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+	RuntimeDebugNoBound,
+)]
 #[codec(mel_bound())]
 #[scale_info(skip_type_params(T, I))]
-#[cfg_attr(feature = "std", derive(frame_support::DebugNoBound, Clone, PartialEq))]
 pub struct Node<T: Config<I>, I: 'static = ()> {
-	pub(crate) id: T::AccountId,
-	pub(crate) prev: Option<T::AccountId>,
-	pub(crate) next: Option<T::AccountId>,
-	pub(crate) bag_upper: T::Score,
-	pub(crate) score: T::Score,
+	pub id: T::AccountId,
+	pub prev: Option<T::AccountId>,
+	pub next: Option<T::AccountId>,
+	pub bag_upper: T::Score,
+	pub score: T::Score,
 	#[codec(skip)]
-	pub(crate) _phantom: PhantomData<I>,
+	pub _phantom: PhantomData<I>,
 }
 
 impl<T: Config<I>, I: 'static> Node<T, I> {
diff --git a/substrate/frame/balances/src/types.rs b/substrate/frame/balances/src/types.rs
index 917b7507d7c941156a080163bf029f468c75b097..7f39274789118c688526604d7bd58f6af121f8dd 100644
--- a/substrate/frame/balances/src/types.rs
+++ b/substrate/frame/balances/src/types.rs
@@ -18,7 +18,7 @@
 //! Types used in the pallet.
 
 use crate::{Config, CreditOf, Event, Pallet};
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::ops::BitOr;
 use frame_support::traits::{Imbalance, LockIdentifier, OnUnbalanced, WithdrawReasons};
 use scale_info::TypeInfo;
@@ -145,7 +145,17 @@ impl<T: Config<I>, I: 'static> Drop for DustCleaner<T, I> {
 }
 
 /// Whether something should be interpreted as an increase or a decrease.
-#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	MaxEncodedLen,
+	TypeInfo,
+)]
 pub enum AdjustmentDirection {
 	/// Increase the amount.
 	Increase,
diff --git a/substrate/frame/beefy-mmr/src/mock.rs b/substrate/frame/beefy-mmr/src/mock.rs
index 6756c618d706d2b0b6bc79560df954c93cdb54ea..aa6905306cd6a9b22d717dacbb53c998b3e70774 100644
--- a/substrate/frame/beefy-mmr/src/mock.rs
+++ b/substrate/frame/beefy-mmr/src/mock.rs
@@ -72,6 +72,7 @@ impl pallet_session::Config for Test {
 	type SessionManager = MockSessionManager;
 	type SessionHandler = <MockSessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = MockSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs
index 2f90edf3c358a65249bc1d3b053f458fd75e0bfa..275bf18fe873d7278f490b41832bae0664276a0a 100644
--- a/substrate/frame/beefy/src/mock.rs
+++ b/substrate/frame/beefy/src/mock.rs
@@ -15,7 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use frame_election_provider_support::{
 	bounds::{ElectionBounds, ElectionBoundsBuilder},
 	onchain, SequentialPhragmen, Weight,
@@ -95,7 +95,7 @@ pub struct MockAncestryProofContext {
 	pub is_valid: bool,
 }
 
-#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)]
+#[derive(Clone, Debug, Decode, DecodeWithMemTracking, Encode, PartialEq, TypeInfo)]
 pub struct MockAncestryProof {
 	pub is_optimal: bool,
 	pub is_non_canonical: bool,
@@ -184,12 +184,13 @@ impl pallet_session::Config for Test {
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
 	type SessionHandler = <MockSessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = MockSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<u64, u128>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Self>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 impl pallet_authorship::Config for Test {
diff --git a/substrate/frame/broker/src/core_mask.rs b/substrate/frame/broker/src/core_mask.rs
index b8d045077d8285c1b7998e109733d25f4f946d5d..507bc0897bd21f9a955a04605fef2780db86f454 100644
--- a/substrate/frame/broker/src/core_mask.rs
+++ b/substrate/frame/broker/src/core_mask.rs
@@ -15,7 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not};
 use scale_info::TypeInfo;
 use sp_core::RuntimeDebug;
@@ -25,7 +25,17 @@ pub const CORE_MASK_BITS: usize = 80;
 
 // TODO: Use BitArr instead; for this, we'll need to ensure Codec is impl'ed for `BitArr`.
 #[derive(
-	Encode, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Default,
+	Copy,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
 )]
 pub struct CoreMask([u8; 10]);
 impl CoreMask {
diff --git a/substrate/frame/broker/src/coretime_interface.rs b/substrate/frame/broker/src/coretime_interface.rs
index 9c18e2c4ff0b33ce2357f8c1ba6860f2d56efc49..618c3d70fc561650ca4c139ba875bac60e4e2bef 100644
--- a/substrate/frame/broker/src/coretime_interface.rs
+++ b/substrate/frame/broker/src/coretime_interface.rs
@@ -18,7 +18,7 @@
 #![deny(missing_docs)]
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::fmt::Debug;
 use frame_support::Parameter;
 use scale_info::TypeInfo;
@@ -39,7 +39,17 @@ pub type PartsOf57600 = u16;
 
 /// An element to which a core can be assigned.
 #[derive(
-	Encode, Decode, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo, MaxEncodedLen,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Clone,
+	Eq,
+	PartialEq,
+	Ord,
+	PartialOrd,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
 )]
 pub enum CoreAssignment {
 	/// Core need not be used for anything.
diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs
index f970b310a3cba1dd987c85bdd0a72f0181ccce1c..ec04b9d9b1f6246f10cef19a7764ab909f5b5e05 100644
--- a/substrate/frame/broker/src/types.rs
+++ b/substrate/frame/broker/src/types.rs
@@ -19,7 +19,7 @@ use crate::{
 	Config, CoreAssignment, CoreIndex, CoreMask, CoretimeInterface, RCBlockNumberOf, TaskId,
 	CORE_MASK_BITS,
 };
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use frame_support::traits::fungible::Inspect;
 use frame_system::Config as SConfig;
 use scale_info::TypeInfo;
@@ -42,7 +42,18 @@ pub type CoreMaskBitCount = u32;
 pub type SignedCoreMaskBitCount = i32;
 
 /// Whether a core assignment is revokable or not.
-#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Copy,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum Finality {
 	/// The region remains with the same owner allowing the assignment to be altered.
 	Provisional,
@@ -51,7 +62,18 @@ pub enum Finality {
 }
 
 /// Self-describing identity for a Region of Bulk Coretime.
-#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Copy,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct RegionId {
 	/// The timeslice at which this Region begins.
 	pub begin: Timeslice,
@@ -91,7 +113,17 @@ pub struct RegionRecord<AccountId, Balance> {
 pub type RegionRecordOf<T> = RegionRecord<<T as SConfig>::AccountId, BalanceOf<T>>;
 
 /// An distinct item which can be scheduled on a Polkadot Core.
-#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct ScheduleItem {
 	/// The regularity parts in which this Item will be scheduled on the Core.
 	pub mask: CoreMask,
@@ -259,7 +291,17 @@ pub type LeasesRecordOf<T> = LeasesRecord<<T as Config>::MaxLeasedCores>;
 ///
 /// The blocknumber is the relay chain block height `until` which the original request
 /// for revenue was made.
-#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct OnDemandRevenueRecord<RelayBlockNumber, RelayBalance> {
 	/// The height of the Relay-chain at the time the revenue request was made.
 	pub until: RelayBlockNumber,
@@ -271,7 +313,17 @@ pub type OnDemandRevenueRecordOf<T> =
 	OnDemandRevenueRecord<RelayBlockNumberOf<T>, RelayBalanceOf<T>>;
 
 /// Configuration of this pallet.
-#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct ConfigRecord<RelayBlockNumber> {
 	/// The number of Relay-chain blocks in advance which scheduling should be fixed and the
 	/// `Coretime::assign` API used to inform the Relay-chain.
diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs
index 8e533a7b290432b17eed5f142e9f234f3d1d9d5c..a7bd00a7f4114f067ab1233b9706ace760b12edc 100644
--- a/substrate/frame/collective/src/lib.rs
+++ b/substrate/frame/collective/src/lib.rs
@@ -44,7 +44,7 @@
 extern crate alloc;
 
 use alloc::{boxed::Box, vec, vec::Vec};
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::{marker::PhantomData, result};
 use scale_info::TypeInfo;
 use sp_io::storage;
@@ -137,7 +137,17 @@ impl DefaultVote for MoreThanMajorityThenPrimeDefaultVote {
 }
 
 /// Origin for the collective module.
-#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)]
+#[derive(
+	PartialEq,
+	Eq,
+	Clone,
+	RuntimeDebug,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 #[scale_info(skip_type_params(I))]
 #[codec(mel_bound(AccountId: MaxEncodedLen))]
 pub enum RawOrigin<AccountId, I> {
diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs
index 7bb5b46cf527f24dc89d2d17d796a77bcefea4a5..627aca17c9c94dc34dd82f50fb2fbcaea21a29ef 100644
--- a/substrate/frame/contracts/src/lib.rs
+++ b/substrate/frame/contracts/src/lib.rs
@@ -116,7 +116,7 @@ use crate::{
 	storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager},
 	wasm::{CodeInfo, RuntimeCosts, WasmBlob},
 };
-use codec::{Codec, Decode, Encode, HasCompact, MaxEncodedLen};
+use codec::{Codec, Decode, DecodeWithMemTracking, Encode, HasCompact, MaxEncodedLen};
 use core::fmt::Debug;
 use environmental::*;
 use frame_support::{
@@ -1382,7 +1382,9 @@ pub mod pallet {
 }
 
 /// The type of origins supported by the contracts pallet.
-#[derive(Clone, Encode, Decode, PartialEq, TypeInfo, RuntimeDebugNoBound)]
+#[derive(
+	Clone, Encode, Decode, DecodeWithMemTracking, PartialEq, TypeInfo, RuntimeDebugNoBound,
+)]
 pub enum Origin<T: Config> {
 	Root,
 	Signed(T::AccountId),
diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs
index c9786fa1516b52e58dfc5aaed7b2f100cead576f..399ed1ccded75465d01ff66a20714de4e7f9bed5 100644
--- a/substrate/frame/contracts/src/wasm/mod.rs
+++ b/substrate/frame/contracts/src/wasm/mod.rs
@@ -49,7 +49,7 @@ use crate::{
 	HoldReason, Pallet, PristineCode, Schedule, Weight, LOG_TARGET,
 };
 use alloc::vec::Vec;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use frame_support::{
 	dispatch::DispatchResult,
 	ensure,
@@ -106,7 +106,16 @@ pub struct CodeInfo<T: Config> {
 
 /// Defines the required determinism level of a wasm blob when either running or uploading code.
 #[derive(
-	Clone, Copy, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen, RuntimeDebug, PartialEq, Eq,
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	scale_info::TypeInfo,
+	MaxEncodedLen,
+	RuntimeDebug,
+	PartialEq,
+	Eq,
 )]
 pub enum Determinism {
 	/// The execution should be deterministic and hence no indeterministic instructions are
diff --git a/substrate/frame/conviction-voting/src/conviction.rs b/substrate/frame/conviction-voting/src/conviction.rs
index b5c9a3a705f6b306a6f1d596d1e58f427ad4e23e..4332c8e4a0a7c6263c44bb654825f5bb47ce6e58 100644
--- a/substrate/frame/conviction-voting/src/conviction.rs
+++ b/substrate/frame/conviction-voting/src/conviction.rs
@@ -17,7 +17,7 @@
 
 //! The conviction datatype.
 
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 use sp_runtime::{
 	traits::{Bounded, CheckedDiv, CheckedMul, Zero},
@@ -30,6 +30,7 @@ use crate::types::Delegations;
 #[derive(
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	Copy,
 	Clone,
 	Eq,
diff --git a/substrate/frame/conviction-voting/src/lib.rs b/substrate/frame/conviction-voting/src/lib.rs
index 3dd2ad24298d344d4122933546406fd3dc8c9fb9..fda97281f16bd64f941d9314a83032f02f22b173 100644
--- a/substrate/frame/conviction-voting/src/lib.rs
+++ b/substrate/frame/conviction-voting/src/lib.rs
@@ -68,9 +68,9 @@ pub type BlockNumberFor<T, I> =
 	<<T as Config<I>>::BlockNumberProvider as BlockNumberProvider>::BlockNumber;
 
 type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;
-type BalanceOf<T, I = ()> =
+pub type BalanceOf<T, I = ()> =
 	<<T as Config<I>>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
-type VotingOf<T, I = ()> = Voting<
+pub type VotingOf<T, I = ()> = Voting<
 	BalanceOf<T, I>,
 	<T as frame_system::Config>::AccountId,
 	BlockNumberFor<T, I>,
@@ -82,10 +82,10 @@ type DelegatingOf<T, I = ()> =
 	Delegating<BalanceOf<T, I>, <T as frame_system::Config>::AccountId, BlockNumberFor<T, I>>;
 pub type TallyOf<T, I = ()> = Tally<BalanceOf<T, I>, <T as Config<I>>::MaxTurnout>;
 pub type VotesOf<T, I = ()> = BalanceOf<T, I>;
-type PollIndexOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Index;
+pub type PollIndexOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Index;
 #[cfg(feature = "runtime-benchmarks")]
-type IndexOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Index;
-type ClassOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Class;
+pub type IndexOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Index;
+pub type ClassOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Class;
 
 #[frame_support::pallet]
 pub mod pallet {
diff --git a/substrate/frame/conviction-voting/src/vote.rs b/substrate/frame/conviction-voting/src/vote.rs
index 1c5b742ba12b16714b7c886d94b374c8d2dde5a3..84c7dafd52c4ad5c92338580fcfd410525a339c2 100644
--- a/substrate/frame/conviction-voting/src/vote.rs
+++ b/substrate/frame/conviction-voting/src/vote.rs
@@ -18,7 +18,7 @@
 //! The vote datatype.
 
 use crate::{Conviction, Delegations};
-use codec::{Decode, Encode, EncodeLike, Input, MaxEncodedLen, Output};
+use codec::{Decode, DecodeWithMemTracking, Encode, EncodeLike, Input, MaxEncodedLen, Output};
 use frame_support::{pallet_prelude::Get, BoundedVec};
 use scale_info::TypeInfo;
 use sp_runtime::{
@@ -27,7 +27,9 @@ use sp_runtime::{
 };
 
 /// A number of lock periods, plus a vote, one way or the other.
-#[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug, MaxEncodedLen)]
+#[derive(
+	DecodeWithMemTracking, Copy, Clone, Eq, PartialEq, Default, RuntimeDebug, MaxEncodedLen,
+)]
 pub struct Vote {
 	pub aye: bool,
 	pub conviction: Conviction,
@@ -66,7 +68,18 @@ impl TypeInfo for Vote {
 }
 
 /// A vote for a referendum of a particular account.
-#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Copy,
+	Clone,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum AccountVote<Balance> {
 	/// A standard vote, one-way (approve or reject) with a given amount of conviction.
 	Standard { vote: Vote, balance: Balance },
diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs
index 22ba63b26161d45efea8189e60d24bf0cbb9e04b..3d2da6e9f717e9f5869eed8eb8f1872350164bc6 100644
--- a/substrate/frame/core-fellowship/src/lib.rs
+++ b/substrate/frame/core-fellowship/src/lib.rs
@@ -62,7 +62,7 @@
 extern crate alloc;
 
 use alloc::boxed::Box;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::{fmt::Debug, marker::PhantomData};
 use scale_info::TypeInfo;
 use sp_arithmetic::traits::{Saturating, Zero};
@@ -91,7 +91,18 @@ pub use pallet::*;
 pub use weights::*;
 
 /// The desired outcome for which evidence is presented.
-#[derive(Encode, Decode, Eq, PartialEq, Copy, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	Copy,
+	Clone,
+	TypeInfo,
+	MaxEncodedLen,
+	RuntimeDebug,
+)]
 pub enum Wish {
 	/// Member wishes only to retain their current rank.
 	Retention,
@@ -109,6 +120,7 @@ pub type Evidence<T, I> = BoundedVec<u8, <T as Config<I>>::EvidenceSize>;
 #[derive(
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	CloneNoBound,
 	EqNoBound,
 	PartialEqNoBound,
@@ -241,17 +253,16 @@ pub mod pallet {
 
 	/// The overall status of the system.
 	#[pallet::storage]
-	pub(super) type Params<T: Config<I>, I: 'static = ()> =
-		StorageValue<_, ParamsOf<T, I>, ValueQuery>;
+	pub type Params<T: Config<I>, I: 'static = ()> = StorageValue<_, ParamsOf<T, I>, ValueQuery>;
 
 	/// The status of a claimant.
 	#[pallet::storage]
-	pub(super) type Member<T: Config<I>, I: 'static = ()> =
+	pub type Member<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::AccountId, MemberStatusOf<T>, OptionQuery>;
 
 	/// Some evidence together with the desired outcome for which it was presented.
 	#[pallet::storage]
-	pub(super) type MemberEvidence<T: Config<I>, I: 'static = ()> =
+	pub type MemberEvidence<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::AccountId, (Wish, Evidence<T, I>), OptionQuery>;
 
 	#[pallet::event]
diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs
index 0dacfe9c55792f53e07fe708323ed6e0f8d5c784..fadc8d290d6f9e7ff05e718b57e0bdaa5e685e4a 100644
--- a/substrate/frame/delegated-staking/src/lib.rs
+++ b/substrate/frame/delegated-staking/src/lib.rs
@@ -273,12 +273,12 @@ pub mod pallet {
 	/// Implementation note: We are not using a double map with `delegator` and `agent` account
 	/// as keys since we want to restrict delegators to delegate only to one account at a time.
 	#[pallet::storage]
-	pub(crate) type Delegators<T: Config> =
+	pub type Delegators<T: Config> =
 		CountedStorageMap<_, Twox64Concat, T::AccountId, Delegation<T>, OptionQuery>;
 
 	/// Map of `Agent` to their `Ledger`.
 	#[pallet::storage]
-	pub(crate) type Agents<T: Config> =
+	pub type Agents<T: Config> =
 		CountedStorageMap<_, Twox64Concat, T::AccountId, AgentLedger<T>, OptionQuery>;
 
 	// This pallet is not currently written with the intention of exposing any calls. But the
diff --git a/substrate/frame/democracy/src/conviction.rs b/substrate/frame/democracy/src/conviction.rs
index 54f4ff524f2a9be397a12eadb4bb0c9e88cbf501..64c062ce9434c868d8b8cd1c27eea3f3a504f1d5 100644
--- a/substrate/frame/democracy/src/conviction.rs
+++ b/substrate/frame/democracy/src/conviction.rs
@@ -18,7 +18,7 @@
 //! The conviction datatype.
 
 use crate::types::Delegations;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::result::Result;
 use scale_info::TypeInfo;
 use sp_runtime::{
@@ -31,6 +31,7 @@ use sp_runtime::{
 	Encode,
 	MaxEncodedLen,
 	Decode,
+	DecodeWithMemTracking,
 	Copy,
 	Clone,
 	Eq,
diff --git a/substrate/frame/democracy/src/types.rs b/substrate/frame/democracy/src/types.rs
index ee6e2e0aa253793751053a4393a2409b42bbee0f..88a7435a3b1628e9eade2e49fa3956b433356a82 100644
--- a/substrate/frame/democracy/src/types.rs
+++ b/substrate/frame/democracy/src/types.rs
@@ -18,7 +18,7 @@
 //! Miscellaneous additional datatypes.
 
 use crate::{AccountVote, Conviction, Vote, VoteThreshold};
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 use sp_runtime::{
 	traits::{Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Saturating, Zero},
@@ -214,7 +214,17 @@ pub enum UnvoteScope {
 }
 
 /// Identifies an owner of a metadata.
-#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum MetadataOwner {
 	/// External proposal.
 	External,
diff --git a/substrate/frame/democracy/src/vote.rs b/substrate/frame/democracy/src/vote.rs
index 779f7ecd570f060f29d53ebd05471eb37b531ff1..c1103d4da32e58e63a4f0a3d5c297f172ae413a1 100644
--- a/substrate/frame/democracy/src/vote.rs
+++ b/substrate/frame/democracy/src/vote.rs
@@ -18,7 +18,7 @@
 //! The vote datatype.
 
 use crate::{Conviction, Delegations, ReferendumIndex};
-use codec::{Decode, Encode, EncodeLike, Input, MaxEncodedLen, Output};
+use codec::{Decode, DecodeWithMemTracking, Encode, EncodeLike, Input, MaxEncodedLen, Output};
 use frame_support::traits::Get;
 use scale_info::TypeInfo;
 use sp_runtime::{
@@ -27,7 +27,7 @@ use sp_runtime::{
 };
 
 /// A number of lock periods, plus a vote, one way or the other.
-#[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug)]
+#[derive(DecodeWithMemTracking, Copy, Clone, Eq, PartialEq, Default, RuntimeDebug)]
 pub struct Vote {
 	pub aye: bool,
 	pub conviction: Conviction,
@@ -72,7 +72,18 @@ impl TypeInfo for Vote {
 }
 
 /// A vote for a referendum of a particular account.
-#[derive(Encode, MaxEncodedLen, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)]
+#[derive(
+	Encode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	Decode,
+	Copy,
+	Clone,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+)]
 pub enum AccountVote<Balance> {
 	/// A standard vote, one-way (approve or reject) with a given amount of conviction.
 	Standard { vote: Vote, balance: Balance },
diff --git a/substrate/frame/democracy/src/vote_threshold.rs b/substrate/frame/democracy/src/vote_threshold.rs
index 82d6ed178f13783f2e9495ce064378b3f8de5ecf..9e4e66897a06a287f56fcb5589ef448f4db9b35a 100644
--- a/substrate/frame/democracy/src/vote_threshold.rs
+++ b/substrate/frame/democracy/src/vote_threshold.rs
@@ -18,7 +18,7 @@
 //! Voting thresholds.
 
 use crate::Tally;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::ops::{Add, Div, Mul, Rem};
 use scale_info::TypeInfo;
 #[cfg(feature = "std")]
@@ -27,7 +27,16 @@ use sp_runtime::traits::{IntegerSquareRoot, Zero};
 
 /// A means of determining if a vote is past pass threshold.
 #[derive(
-	Clone, Copy, PartialEq, Eq, Encode, MaxEncodedLen, Decode, sp_runtime::RuntimeDebug, TypeInfo,
+	Clone,
+	Copy,
+	PartialEq,
+	Eq,
+	Encode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	Decode,
+	sp_runtime::RuntimeDebug,
+	TypeInfo,
 )]
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 pub enum VoteThreshold {
diff --git a/substrate/frame/election-provider-multi-block/src/lib.rs b/substrate/frame/election-provider-multi-block/src/lib.rs
index 355f117bc4573d6fc5f96cd48b412edce9218aff..86a94c67c5844b30aa84ade16e328e3712ae68c7 100644
--- a/substrate/frame/election-provider-multi-block/src/lib.rs
+++ b/substrate/frame/election-provider-multi-block/src/lib.rs
@@ -66,7 +66,7 @@
 //!
 //! ## Pagination
 //!
-//! Most of the external APIs of this pallet are paginated. All pagination follow a patter where if
+//! Most of the external APIs of this pallet are paginated. All pagination follow a pattern where if
 //! `N` pages exist, the first paginated call is `function(N-1)` and the last one is `function(0)`.
 //! For example, with 3 pages, the `elect` of [`ElectionProvider`] is expected to be called as
 //! `elect(2) -> elect(1) -> elect(0)`. In essence, calling a paginated function with index 0 is
@@ -324,7 +324,15 @@ impl<T: Config> From<verifier::FeasibilityError> for ElectionError<T> {
 
 /// Different operations that the [`Config::AdminOrigin`] can perform on the pallet.
 #[derive(
-	Encode, Decode, MaxEncodedLen, TypeInfo, DebugNoBound, CloneNoBound, PartialEqNoBound, EqNoBound,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	TypeInfo,
+	DebugNoBound,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
 )]
 #[codec(mel_bound(T: Config))]
 #[scale_info(skip_type_params(T))]
diff --git a/substrate/frame/election-provider-multi-block/src/types.rs b/substrate/frame/election-provider-multi-block/src/types.rs
index 9657277a79e422aa61cf916addc9a90323baf3c0..f74c9aee05311b2b3ddc635b67ecccb46bc08766 100644
--- a/substrate/frame/election-provider-multi-block/src/types.rs
+++ b/substrate/frame/election-provider-multi-block/src/types.rs
@@ -22,7 +22,7 @@ use sp_core::Get;
 use sp_std::{collections::btree_set::BTreeSet, fmt::Debug, prelude::*};
 
 use crate::unsigned::miner::MinerConfig;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use frame_election_provider_support::ElectionProvider;
 pub use frame_election_provider_support::{NposSolution, PageIndex};
 use scale_info::TypeInfo;
@@ -53,6 +53,7 @@ pub type AssignmentOf<T> =
 	TypeInfo,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	DebugNoBound,
 	CloneNoBound,
 	EqNoBound,
@@ -230,7 +231,18 @@ impl Default for ElectionCompute {
 }
 
 /// Current phase of the pallet.
-#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, MaxEncodedLen, Debug, TypeInfo)]
+#[derive(
+	PartialEq,
+	Eq,
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	Debug,
+	TypeInfo,
+)]
 pub enum Phase<Bn> {
 	/// Nothing is happening, and nothing will happen.
 	Halted,
diff --git a/substrate/frame/election-provider-multi-block/src/verifier/mod.rs b/substrate/frame/election-provider-multi-block/src/verifier/mod.rs
index 98391daa546c49f39cf9f4691c0be0e33f8861b0..a5f7c4aa5c3daca47fb276afd2538abcc61148e3 100644
--- a/substrate/frame/election-provider-multi-block/src/verifier/mod.rs
+++ b/substrate/frame/election-provider-multi-block/src/verifier/mod.rs
@@ -78,7 +78,16 @@ use sp_std::{fmt::Debug, prelude::*};
 pub use crate::weights::measured::pallet_election_provider_multi_block_verifier::*;
 
 /// Errors that can happen in the feasibility check.
-#[derive(Debug, Eq, PartialEq, codec::Encode, codec::Decode, scale_info::TypeInfo, Clone)]
+#[derive(
+	Debug,
+	Eq,
+	PartialEq,
+	codec::Encode,
+	codec::Decode,
+	codec::DecodeWithMemTracking,
+	scale_info::TypeInfo,
+	Clone,
+)]
 pub enum FeasibilityError {
 	/// Wrong number of winners presented.
 	WrongWinnerCount,
diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs
index 3a5103d2bb8abe1a5253dd2f5e83ac850ed4b70a..c0e256c3e65253c34fa891b92586ec487b161fb1 100644
--- a/substrate/frame/election-provider-multi-phase/src/lib.rs
+++ b/substrate/frame/election-provider-multi-phase/src/lib.rs
@@ -244,7 +244,7 @@
 extern crate alloc;
 
 use alloc::{boxed::Box, vec::Vec};
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use frame_election_provider_support::{
 	bounds::{CountBound, ElectionBounds, SizeBound},
 	BoundedSupports, BoundedSupportsOf, ElectionDataProvider, ElectionProvider,
@@ -336,7 +336,7 @@ pub trait BenchmarkingConfig {
 }
 
 /// Current phase of the pallet.
-#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo)]
+#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, Debug, TypeInfo)]
 pub enum Phase<Bn> {
 	/// Nothing, the election is not happening.
 	Off,
@@ -398,7 +398,7 @@ impl<Bn: PartialEq + Eq> Phase<Bn> {
 }
 
 /// The type of `Computation` that provided this election data.
-#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo)]
+#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, Debug, TypeInfo)]
 pub enum ElectionCompute {
 	/// Election was computed on-chain.
 	OnChain,
@@ -424,7 +424,18 @@ impl Default for ElectionCompute {
 ///
 /// Such a solution should never become effective in anyway before being checked by the
 /// `Pallet::feasibility_check`.
-#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, PartialOrd, Ord, TypeInfo)]
+#[derive(
+	PartialEq,
+	Eq,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	RuntimeDebug,
+	PartialOrd,
+	Ord,
+	TypeInfo,
+)]
 pub struct RawSolution<S> {
 	/// the solution itself.
 	pub solution: S,
@@ -490,7 +501,9 @@ pub struct RoundSnapshot<AccountId, DataProvider> {
 /// This is stored automatically on-chain, and it contains the **size of the entire snapshot**.
 /// This is also used in dispatchables as weight witness data and should **only contain the size of
 /// the presented solution**, not the entire snapshot.
-#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, Default, TypeInfo)]
+#[derive(
+	PartialEq, Eq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, Debug, Default, TypeInfo,
+)]
 pub struct SolutionOrSnapshotSize {
 	/// The length of voters.
 	#[codec(compact)]
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs
index b1029e89fe85f65650fb5406314241c220cd2b28..fa64dd6f7d6ebd32998f93e95e2aba8b685fa916 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs
@@ -170,7 +170,7 @@ fn mass_slash_doesnt_enter_emergency_phase() {
 		}
 
 		// Ensure no more than disabling limit of validators (default 1/3) is disabled
-		let disabling_limit = pallet_staking::UpToLimitWithReEnablingDisablingStrategy::<
+		let disabling_limit = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy::<
 			SLASHING_DISABLING_FACTOR,
 		>::disable_limit(active_set_size_before_slash);
 		assert!(disabled.len() == disabling_limit);
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
index 8c8de865600c0964bb69ef6fa530b2527b60794d..120deff96a75eb0d297efc9bc017b2946f39d86c 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
@@ -142,11 +142,14 @@ impl pallet_session::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = pallet_staking::StashOf<Runtime>;
+	type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy<
+		SLASHING_DISABLING_FACTOR,
+	>;
 	type WeightInfo = ();
 }
 impl pallet_session::historical::Config for Runtime {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Runtime>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 frame_election_provider_support::generate_solution_type!(
@@ -335,8 +338,6 @@ impl pallet_staking::Config for Runtime {
 	type MaxUnlockingChunks = MaxUnlockingChunks;
 	type EventListeners = (Pools, DelegatedStaking);
 	type WeightInfo = pallet_staking::weights::SubstrateWeight<Runtime>;
-	type DisablingStrategy =
-		pallet_staking::UpToLimitWithReEnablingDisablingStrategy<SLASHING_DISABLING_FACTOR>;
 	type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig;
 }
 
@@ -908,10 +909,7 @@ pub(crate) fn on_offence_now(
 // Add offence to validator, slash it.
 pub(crate) fn add_slash(who: &AccountId) {
 	on_offence_now(
-		&[OffenceDetails {
-			offender: (*who, Staking::eras_stakers(active_era(), who)),
-			reporters: vec![],
-		}],
+		&[OffenceDetails { offender: (*who, ()), reporters: vec![] }],
 		&[Perbill::from_percent(10)],
 	);
 }
diff --git a/substrate/frame/election-provider-support/solution-type/src/single_page.rs b/substrate/frame/election-provider-support/solution-type/src/single_page.rs
index f57dcb9694a83c58eb25a930315df495f1e67209..c921be34b34304b9e65683b7d2f67bbcb6e7b15c 100644
--- a/substrate/frame/election-provider-support/solution-type/src/single_page.rs
+++ b/substrate/frame/election-provider-support/solution-type/src/single_page.rs
@@ -74,7 +74,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result<TokenStream2> {
 		);
 		quote! {
 			#compact_impl
-			#[derive(Default, PartialEq, Eq, Clone, Debug, PartialOrd, Ord)]
+			#[derive(Default, PartialEq, Eq, Clone, Debug, PartialOrd, Ord, _fepsp::codec::DecodeWithMemTracking)]
 		}
 	} else {
 		// automatically derived.
@@ -88,6 +88,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result<TokenStream2> {
 			PartialOrd,
 			_fepsp::codec::Encode,
 			_fepsp::codec::Decode,
+			_fepsp::codec::DecodeWithMemTracking,
 			_fepsp::scale_info::TypeInfo,
 		)])
 	};
diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs
index 68aee2c82e62b4e9c3c9ccb16a4a2dbcf4e85d75..27f8d96e5e791155ed5fee004afde17f325aebd3 100644
--- a/substrate/frame/election-provider-support/src/lib.rs
+++ b/substrate/frame/election-provider-support/src/lib.rs
@@ -209,7 +209,7 @@ use sp_runtime::{
 };
 
 pub use bounds::DataProviderBounds;
-pub use codec::{Decode, Encode, MaxEncodedLen};
+pub use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 /// Re-export the solution generation macro.
 pub use frame_election_provider_solution_type::generate_solution_type;
 pub use frame_support::{traits::Get, weights::Weight, BoundedVec, DefaultNoBound};
@@ -830,7 +830,9 @@ pub type VoterOf<D> =
 	Voter<<D as ElectionDataProvider>::AccountId, <D as ElectionDataProvider>::MaxVotesPerVoter>;
 
 /// A bounded vector of supports. Bounded equivalent to [`sp_npos_elections::Supports`].
-#[derive(Default, Debug, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)]
+#[derive(
+	Default, Debug, Encode, Decode, DecodeWithMemTracking, scale_info::TypeInfo, MaxEncodedLen,
+)]
 #[codec(mel_bound(AccountId: MaxEncodedLen, Bound: Get<u32>))]
 #[scale_info(skip_type_params(Bound))]
 pub struct BoundedSupport<AccountId, Bound: Get<u32>> {
@@ -906,7 +908,7 @@ impl<AccountId: Clone, Bound: Get<u32>> BoundedSupport<AccountId, Bound> {
 /// corresponds to the bound of the maximum winners that the bounded supports may contain.
 ///
 /// With the bounds, we control the maximum size of a bounded supports instance.
-#[derive(Encode, Decode, TypeInfo, DefaultNoBound, MaxEncodedLen)]
+#[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo, DefaultNoBound, MaxEncodedLen)]
 #[codec(mel_bound(AccountId: MaxEncodedLen, BOuter: Get<u32>, BInner: Get<u32>))]
 #[scale_info(skip_type_params(BOuter, BInner))]
 pub struct BoundedSupports<AccountId, BOuter: Get<u32>, BInner: Get<u32>>(
diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs
index 4a40d44e407761d08b4e9c2741ced71153a80555..d73cd510f820de2cc3b96266b4f05f8358bb572a 100644
--- a/substrate/frame/elections-phragmen/src/lib.rs
+++ b/substrate/frame/elections-phragmen/src/lib.rs
@@ -101,7 +101,7 @@
 extern crate alloc;
 
 use alloc::{vec, vec::Vec};
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use core::cmp::Ordering;
 use frame_support::{
 	traits::{
@@ -139,7 +139,7 @@ type NegativeImbalanceOf<T> = <<T as Config>::Currency as Currency<
 >>::NegativeImbalance;
 
 /// An indication that the renouncing account currently has which of the below roles.
-#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug, TypeInfo)]
+#[derive(Encode, Decode, DecodeWithMemTracking, Clone, PartialEq, RuntimeDebug, TypeInfo)]
 pub enum Renouncing {
 	/// A member is renouncing.
 	Member,
diff --git a/substrate/frame/fast-unstake/src/types.rs b/substrate/frame/fast-unstake/src/types.rs
index 2a2319ef61296a5781b9ff752d45da1aa7c4cbf6..518840a16a30367974d7ce4a85712081bb57dc83 100644
--- a/substrate/frame/fast-unstake/src/types.rs
+++ b/substrate/frame/fast-unstake/src/types.rs
@@ -20,7 +20,7 @@
 use crate::Config;
 use codec::{Decode, Encode, MaxEncodedLen};
 use frame_support::{
-	traits::Currency, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound,
+	traits::Currency, BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound,
 };
 use scale_info::TypeInfo;
 use sp_staking::{EraIndex, StakingInterface};
@@ -39,14 +39,21 @@ impl<T: Config> frame_support::traits::Get<u32> for MaxChecking<T> {
 }
 
 #[docify::export]
-pub(crate) type BalanceOf<T> =
+pub type BalanceOf<T> =
 	<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
 /// An unstake request.
 ///
 /// This is stored in [`crate::Head`] storage item and points to the current unstake request that is
 /// being processed.
 #[derive(
-	Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen,
+	Encode,
+	Decode,
+	EqNoBound,
+	PartialEqNoBound,
+	CloneNoBound,
+	TypeInfo,
+	RuntimeDebugNoBound,
+	MaxEncodedLen,
 )]
 #[scale_info(skip_type_params(T))]
 pub struct UnstakeRequest<T: Config> {
diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs
index a14bdc9d73b1f95d08df167f0f1394fadf48525a..482e767d32fc05faee88a866603ab5d48fa421b1 100644
--- a/substrate/frame/grandpa/src/mock.rs
+++ b/substrate/frame/grandpa/src/mock.rs
@@ -104,12 +104,13 @@ impl pallet_session::Config for Test {
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
 	type SessionHandler = <TestSessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = TestSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<u64, u128>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Self>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 impl pallet_authorship::Config for Test {
diff --git a/substrate/frame/identity/src/legacy.rs b/substrate/frame/identity/src/legacy.rs
index de5b9f79b559b0de80894f8574854644c7dc239c..a5ef069f9268c4bf31a58caf1422addc4bc63b3a 100644
--- a/substrate/frame/identity/src/legacy.rs
+++ b/substrate/frame/identity/src/legacy.rs
@@ -17,7 +17,7 @@
 
 #[cfg(feature = "runtime-benchmarks")]
 use alloc::vec;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 #[cfg(feature = "runtime-benchmarks")]
 use enumflags2::BitFlag;
 use enumflags2::{bitflags, BitFlags};
@@ -69,6 +69,7 @@ impl TypeInfo for IdentityField {
 	CloneNoBound,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	EqNoBound,
 	MaxEncodedLen,
 	PartialEqNoBound,
diff --git a/substrate/frame/identity/src/types.rs b/substrate/frame/identity/src/types.rs
index ece3c34f82efc78a86534a901374fab851d20105..85f2f8f8597bc4faac25497969a0bf2018d1cd1b 100644
--- a/substrate/frame/identity/src/types.rs
+++ b/substrate/frame/identity/src/types.rs
@@ -17,7 +17,7 @@
 
 use super::*;
 use alloc::{vec, vec::Vec};
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::{fmt::Debug, iter::once, ops::Add};
 use frame_support::{
 	traits::{ConstU32, Get},
@@ -39,7 +39,7 @@ pub type RegistrarIndex = u32;
 /// than 32-bytes then it will be truncated when encoding.
 ///
 /// Can also be `None`.
-#[derive(Clone, Eq, PartialEq, RuntimeDebug, MaxEncodedLen)]
+#[derive(Clone, DecodeWithMemTracking, Eq, PartialEq, RuntimeDebug, MaxEncodedLen)]
 pub enum Data {
 	/// No data here.
 	None,
@@ -190,7 +190,18 @@ impl Default for Data {
 ///
 /// NOTE: Registrars may pay little attention to some fields. Registrars may want to make clear
 /// which fields their attestation is relevant for by off-chain means.
-#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
+#[derive(
+	Copy,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	MaxEncodedLen,
+	TypeInfo,
+)]
 pub enum Judgement<Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq>
 {
 	/// The default value; no opinion is held.
diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs
index 74d3bc6484dd47217be4bdc31d2a68f97e687a8e..28d97489d98148193280b8704c78c5ed15355441 100644
--- a/substrate/frame/im-online/src/lib.rs
+++ b/substrate/frame/im-online/src/lib.rs
@@ -85,7 +85,7 @@ pub mod weights;
 extern crate alloc;
 
 use alloc::{vec, vec::Vec};
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use frame_support::{
 	pallet_prelude::*,
 	traits::{
@@ -218,7 +218,7 @@ impl<BlockNumber: core::fmt::Debug> core::fmt::Debug for OffchainErr<BlockNumber
 pub type AuthIndex = u32;
 
 /// Heartbeat which is sent/received.
-#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)]
+#[derive(Encode, Decode, DecodeWithMemTracking, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)]
 pub struct Heartbeat<BlockNumber>
 where
 	BlockNumber: PartialEq + Eq + Decode + Encode,
diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs
index a5d9a6e20e616945d00016944eea16d15b2d050d..4ccbde193147815e270c9023306e9b9d34232b76 100644
--- a/substrate/frame/im-online/src/mock.rs
+++ b/substrate/frame/im-online/src/mock.rs
@@ -127,6 +127,7 @@ impl pallet_session::Config for Runtime {
 	type Keys = UintAuthorityId;
 	type RuntimeEvent = RuntimeEvent;
 	type NextSessionRotation = pallet_session::PeriodicSessions<Period, Offset>;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs
index 9cd3e42d70dd2af61ff2d7897be32f696c14a1c5..0e2a2f6b01b4a602e32f3fa62a61a3bec0701485 100644
--- a/substrate/frame/message-queue/src/lib.rs
+++ b/substrate/frame/message-queue/src/lib.rs
@@ -206,7 +206,7 @@ pub mod weights;
 extern crate alloc;
 
 use alloc::{vec, vec::Vec};
-use codec::{Codec, Decode, Encode, MaxEncodedLen};
+use codec::{Codec, Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::{fmt::Debug, ops::Deref};
 use frame_support::{
 	defensive,
diff --git a/substrate/frame/message-queue/src/mock_helpers.rs b/substrate/frame/message-queue/src/mock_helpers.rs
index 873add776e2093977962a084e35b921a92ba7338..9730d0c6fd22e9525affd6401feada9b03d7c265 100644
--- a/substrate/frame/message-queue/src/mock_helpers.rs
+++ b/substrate/frame/message-queue/src/mock_helpers.rs
@@ -37,7 +37,18 @@ impl IntoWeight for u64 {
 }
 
 /// Mocked message origin for testing.
-#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, MaxEncodedLen, TypeInfo, Debug)]
+#[derive(
+	Copy,
+	Clone,
+	Eq,
+	PartialEq,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	TypeInfo,
+	Debug,
+)]
 pub enum MessageOrigin {
 	Here,
 	There,
diff --git a/substrate/frame/migrations/src/lib.rs b/substrate/frame/migrations/src/lib.rs
index fef61468e6e4ebe6c70626d13e23800f3e65f84e..5fcbc2ee62980c947a33d0fed1b399b117da6497 100644
--- a/substrate/frame/migrations/src/lib.rs
+++ b/substrate/frame/migrations/src/lib.rs
@@ -157,7 +157,7 @@ pub use pallet::*;
 pub use weights::WeightInfo;
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::ops::ControlFlow;
 use frame_support::{
 	defensive, defensive_assert,
@@ -174,7 +174,17 @@ use frame_system::{
 use sp_runtime::Saturating;
 
 /// Points to the next migration to execute.
-#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)]
+#[derive(
+	Debug,
+	Clone,
+	Eq,
+	PartialEq,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	scale_info::TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum MigrationCursor<Cursor, BlockNumber> {
 	/// Points to the currently active migration and its inner cursor.
 	Active(ActiveCursor<Cursor, BlockNumber>),
@@ -202,7 +212,17 @@ impl<Cursor, BlockNumber> From<ActiveCursor<Cursor, BlockNumber>>
 }
 
 /// Points to the currently active migration and its inner cursor.
-#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)]
+#[derive(
+	Debug,
+	Clone,
+	Eq,
+	PartialEq,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	scale_info::TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct ActiveCursor<Cursor, BlockNumber> {
 	/// The index of the migration in the MBM tuple.
 	pub index: u32,
@@ -224,7 +244,9 @@ impl<Cursor, BlockNumber> ActiveCursor<Cursor, BlockNumber> {
 }
 
 /// How to clear the records of historic migrations.
-#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo)]
+#[derive(
+	Debug, Clone, Eq, PartialEq, Encode, Decode, DecodeWithMemTracking, scale_info::TypeInfo,
+)]
 pub enum HistoricCleanupSelector<Id> {
 	/// Clear exactly these entries.
 	///
diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs
index 869b4adc2adcea00c274711c4f5dee1228eaca70..6fdc52d6887d03895199fe1351066901e2d45e7e 100644
--- a/substrate/frame/multisig/src/lib.rs
+++ b/substrate/frame/multisig/src/lib.rs
@@ -74,7 +74,7 @@ macro_rules! log {
 	};
 }
 
-type BalanceOf<T> =
+pub type BalanceOf<T> =
 	<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
 
 pub type BlockNumberFor<T> =
@@ -84,13 +84,23 @@ pub type BlockNumberFor<T> =
 /// block's height. This allows a transaction in which a multisig operation of a particular
 /// composite was created to be uniquely identified.
 #[derive(
-	Copy, Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen,
+	Copy,
+	Clone,
+	Eq,
+	PartialEq,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Default,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
 )]
 pub struct Timepoint<BlockNumber> {
 	/// The height of the chain at the point in time.
-	height: BlockNumber,
+	pub height: BlockNumber,
 	/// The index of the extrinsic at the point in time.
-	index: u32,
+	pub index: u32,
 }
 
 /// An open multisig operation.
@@ -101,13 +111,13 @@ where
 	MaxApprovals: Get<u32>,
 {
 	/// The extrinsic when the multisig operation was opened.
-	when: Timepoint<BlockNumber>,
+	pub when: Timepoint<BlockNumber>,
 	/// The amount held in reserve of the `depositor`, to be returned once the operation ends.
-	deposit: Balance,
+	pub deposit: Balance,
 	/// The account who opened it (i.e. the first to approve it).
-	depositor: AccountId,
+	pub depositor: AccountId,
 	/// The approvals achieved so far, including the depositor. Always sorted.
-	approvals: BoundedVec<AccountId, MaxApprovals>,
+	pub approvals: BoundedVec<AccountId, MaxApprovals>,
 }
 
 type CallHash = [u8; 32];
@@ -157,7 +167,28 @@ pub mod pallet {
 		/// Weight information for extrinsics in this pallet.
 		type WeightInfo: weights::WeightInfo;
 
-		/// Provider for the block number. Normally this is the `frame_system` pallet.
+		/// Query the current block number.
+		///
+		/// Must return monotonically increasing values when called from consecutive blocks.
+		/// Can be configured to return either:
+		/// - the local block number of the runtime via `frame_system::Pallet`
+		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
+		/// - an arbitrary value through a custom implementation of the trait
+		///
+		/// There is currently no migration provided to "hot-swap" block number providers and it may
+		/// result in undefined behavior when doing so. Parachains are therefore best off setting
+		/// this to their local block number provider if they have the pallet already deployed.
+		///
+		/// Suggested values:
+		/// - Solo- and Relay-chains: `frame_system::Pallet`
+		/// - Parachains that may produce blocks sparingly or only when needed (on-demand):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: `RelaychainDataProvider`
+		/// - Parachains with a reliably block production rate (PLO or bulk-coretime):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: no strong recommendation. Both local and remote
+		///     providers can be used. Relay provider can be a bit better in cases where the
+		///     parachain is lagging its block production to avoid clock skew.
 		type BlockNumberProvider: BlockNumberProvider;
 	}
 
diff --git a/substrate/frame/nfts/src/types.rs b/substrate/frame/nfts/src/types.rs
index 3ab85993473a2030ed0430ff569b5b5c5904e826..b263cc0b9f16b7196e2ecb2ab0cc22cb68be18e2 100644
--- a/substrate/frame/nfts/src/types.rs
+++ b/substrate/frame/nfts/src/types.rs
@@ -20,7 +20,7 @@
 use super::*;
 use crate::macros::*;
 use alloc::{vec, vec::Vec};
-use codec::EncodeLike;
+use codec::{DecodeWithMemTracking, EncodeLike};
 use enumflags2::{bitflags, BitFlags};
 use frame_support::{
 	pallet_prelude::{BoundedVec, MaxEncodedLen},
@@ -108,7 +108,18 @@ pub struct CollectionDetails<AccountId, DepositBalance> {
 }
 
 /// Witness data for the destroy transactions.
-#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Copy,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct DestroyWitness {
 	/// The total number of items in this collection that have outstanding item metadata.
 	#[codec(compact)]
@@ -132,7 +143,9 @@ impl<AccountId, DepositBalance> CollectionDetails<AccountId, DepositBalance> {
 }
 
 /// Witness data for items mint transactions.
-#[derive(Clone, Encode, Decode, Default, Eq, PartialEq, RuntimeDebug, TypeInfo)]
+#[derive(
+	Clone, Encode, Decode, DecodeWithMemTracking, Default, Eq, PartialEq, RuntimeDebug, TypeInfo,
+)]
 pub struct MintWitness<ItemId, Balance> {
 	/// Provide the id of the item in a required collection.
 	pub owned_item: Option<ItemId>,
@@ -191,7 +204,17 @@ pub struct ItemMetadata<Deposit, StringLimit: Get<u32>> {
 }
 
 /// Information about the tip.
-#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct ItemTip<CollectionId, ItemId, AccountId, Amount> {
 	/// The collection of the item.
 	pub collection: CollectionId,
@@ -235,7 +258,17 @@ pub struct ItemMetadataDeposit<DepositBalance, AccountId> {
 }
 
 /// Specifies whether the tokens will be sent or received.
-#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum PriceDirection {
 	/// Tokens will be sent.
 	Send,
@@ -244,7 +277,17 @@ pub enum PriceDirection {
 }
 
 /// Holds the details about the price.
-#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct PriceWithDirection<Amount> {
 	/// An amount.
 	pub amount: Amount,
@@ -289,11 +332,25 @@ impl CollectionSettings {
 }
 
 impl_codec_bitflags!(CollectionSettings, u64, CollectionSetting);
+// We can implement `DecodeWithMemTracking` for `CollectionSettings`
+// since `u64` also implements `DecodeWithMemTracking`.
+impl DecodeWithMemTracking for CollectionSettings {}
 
 /// Mint type. Can the NFT be create by anyone, or only the creator of the collection,
 /// or only by wallets that already hold an NFT from a certain collection?
 /// The ownership of a privately minted NFT is still publicly visible.
-#[derive(Clone, Copy, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum MintType<CollectionId> {
 	/// Only an `Issuer` could mint items.
 	Issuer,
@@ -304,7 +361,18 @@ pub enum MintType<CollectionId> {
 }
 
 /// Holds the information about minting.
-#[derive(Clone, Copy, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct MintSettings<Price, BlockNumber, CollectionId> {
 	/// Whether anyone can mint or if minters are restricted to some subset.
 	pub mint_type: MintType<CollectionId>,
@@ -332,7 +400,15 @@ impl<Price, BlockNumber, CollectionId> Default for MintSettings<Price, BlockNumb
 
 /// Attribute namespaces for non-fungible tokens.
 #[derive(
-	Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	scale_info::TypeInfo,
+	MaxEncodedLen,
 )]
 pub enum AttributeNamespace<AccountId> {
 	/// An attribute was set by the pallet.
@@ -346,14 +422,24 @@ pub enum AttributeNamespace<AccountId> {
 }
 
 /// A witness data to cancel attributes approval operation.
-#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)]
+#[derive(Clone, Encode, Decode, DecodeWithMemTracking, Eq, PartialEq, RuntimeDebug, TypeInfo)]
 pub struct CancelAttributesApprovalWitness {
 	/// An amount of attributes previously created by account.
 	pub account_attributes: u32,
 }
 
 /// A list of possible pallet-level attributes.
-#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum PalletAttributes<CollectionId> {
 	/// Marks an item as being used in order to claim another item.
 	UsedToClaim(CollectionId),
@@ -363,7 +449,16 @@ pub enum PalletAttributes<CollectionId> {
 
 /// Collection's configuration.
 #[derive(
-	Clone, Copy, Decode, Default, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo,
+	Clone,
+	Copy,
+	Decode,
+	DecodeWithMemTracking,
+	Default,
+	Encode,
+	MaxEncodedLen,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
 )]
 pub struct CollectionConfig<Price, BlockNumber, CollectionId> {
 	/// Collection's settings.
@@ -422,10 +517,22 @@ impl ItemSettings {
 }
 
 impl_codec_bitflags!(ItemSettings, u64, ItemSetting);
+// We can implement `DecodeWithMemTracking` for `ItemSettings`
+// since `u64` also implements `DecodeWithMemTracking`.
+impl DecodeWithMemTracking for ItemSettings {}
 
 /// Item's configuration.
 #[derive(
-	Encode, Decode, Default, PartialEq, RuntimeDebug, Clone, Copy, MaxEncodedLen, TypeInfo,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Default,
+	PartialEq,
+	RuntimeDebug,
+	Clone,
+	Copy,
+	MaxEncodedLen,
+	TypeInfo,
 )]
 pub struct ItemConfig {
 	/// Item's settings.
@@ -516,7 +623,7 @@ impl CollectionRoles {
 }
 impl_codec_bitflags!(CollectionRoles, u8, CollectionRole);
 
-#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)]
+#[derive(Clone, Eq, PartialEq, Encode, Decode, DecodeWithMemTracking, RuntimeDebug, TypeInfo)]
 pub struct PreSignedMint<CollectionId, ItemId, AccountId, Deadline, Balance> {
 	/// A collection of the item to be minted.
 	pub collection: CollectionId,
@@ -534,7 +641,7 @@ pub struct PreSignedMint<CollectionId, ItemId, AccountId, Deadline, Balance> {
 	pub mint_price: Option<Balance>,
 }
 
-#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)]
+#[derive(Clone, Eq, PartialEq, Encode, Decode, DecodeWithMemTracking, RuntimeDebug, TypeInfo)]
 pub struct PreSignedAttributes<CollectionId, ItemId, AccountId, Deadline> {
 	/// Collection's ID.
 	pub collection: CollectionId,
diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs
index b45861289a561f2ec33b6f41e225384f83feda89..39a7cf05b3abadff82aeb8ab8b34d9918eccbb23 100644
--- a/substrate/frame/nomination-pools/src/lib.rs
+++ b/substrate/frame/nomination-pools/src/lib.rs
@@ -355,7 +355,7 @@ extern crate alloc;
 
 use adapter::{Member, Pool, StakeStrategy};
 use alloc::{collections::btree_map::BTreeMap, vec::Vec};
-use codec::Codec;
+use codec::{Codec, DecodeWithMemTracking};
 use core::{fmt::Debug, ops::Div};
 use frame_support::{
 	defensive, defensive_assert, ensure,
@@ -422,7 +422,16 @@ pub type BlockNumberFor<T> =
 pub const POINTS_TO_BALANCE_INIT_RATIO: u32 = 1;
 
 /// Possible operations on the configuration values of this pallet.
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebugNoBound, PartialEq, Clone)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	TypeInfo,
+	RuntimeDebugNoBound,
+	PartialEq,
+	Clone,
+)]
 pub enum ConfigOp<T: Codec + Debug> {
 	/// Don't change.
 	Noop,
@@ -441,7 +450,7 @@ pub enum BondType {
 }
 
 /// How to increase the bond of a member.
-#[derive(Encode, Decode, Clone, Copy, Debug, PartialEq, Eq, TypeInfo)]
+#[derive(Encode, Decode, DecodeWithMemTracking, Clone, Copy, Debug, PartialEq, Eq, TypeInfo)]
 pub enum BondExtra<Balance> {
 	/// Take from the free balance.
 	FreeBalance(Balance),
@@ -457,7 +466,18 @@ enum AccountType {
 }
 
 /// The permission a pool member can set for other accounts to claim rewards on their behalf.
-#[derive(Encode, Decode, MaxEncodedLen, Clone, Copy, Debug, PartialEq, Eq, TypeInfo)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	Clone,
+	Copy,
+	Debug,
+	PartialEq,
+	Eq,
+	TypeInfo,
+)]
 pub enum ClaimPermission {
 	/// Only the pool member themselves can claim their rewards.
 	Permissioned,
@@ -497,7 +517,8 @@ impl ClaimPermission {
 	TypeInfo,
 	RuntimeDebugNoBound,
 	CloneNoBound,
-	frame_support::PartialEqNoBound,
+	PartialEqNoBound,
+	EqNoBound,
 )]
 #[cfg_attr(feature = "std", derive(DefaultNoBound))]
 #[scale_info(skip_type_params(T))]
@@ -670,7 +691,17 @@ impl<T: Config> PoolMember<T> {
 }
 
 /// A pool's possible states.
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, RuntimeDebugNoBound, Clone, Copy)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	TypeInfo,
+	PartialEq,
+	RuntimeDebugNoBound,
+	Clone,
+	Copy,
+)]
 pub enum PoolState {
 	/// The pool is open to be joined, and is working normally.
 	Open,
@@ -703,7 +734,18 @@ pub struct PoolRoles<AccountId> {
 }
 
 // A pool's possible commission claiming permissions.
-#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	PartialEq,
+	Eq,
+	Copy,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum CommissionClaimPermission<AccountId> {
 	Permissionless,
 	Account(AccountId),
@@ -917,7 +959,9 @@ impl<T: Config> Commission<T> {
 /// blocks that must elapse before commission updates are allowed again.
 ///
 /// Commission change rates are not applied to decreases in commission.
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, Debug, PartialEq, Copy, Clone)]
+#[derive(
+	Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, TypeInfo, Debug, PartialEq, Copy, Clone,
+)]
 pub struct CommissionChangeRate<BlockNumber> {
 	/// The maximum amount the commission can be updated by per `min_delay` period.
 	pub max_increase: Perbill,
@@ -1295,8 +1339,17 @@ impl<T: Config> BondedPool<T> {
 /// A reward pool is not so much a pool anymore, since it does not contain any shares or points.
 /// Rather, simply to fit nicely next to bonded pool and unbonding pools in terms of terminology. In
 /// reality, a reward pool is just a container for a few pool-dependent data related to the rewards.
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebugNoBound)]
-#[cfg_attr(feature = "std", derive(Clone, PartialEq, DefaultNoBound))]
+#[derive(
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+	RuntimeDebugNoBound,
+)]
+#[cfg_attr(feature = "std", derive(DefaultNoBound))]
 #[codec(mel_bound(T: Config))]
 #[scale_info(skip_type_params(T))]
 pub struct RewardPool<T: Config> {
@@ -1304,19 +1357,19 @@ pub struct RewardPool<T: Config> {
 	///
 	/// This is updated ONLY when the points in the bonded pool change, which means `join`,
 	/// `bond_extra` and `unbond`, all of which is done through `update_recorded`.
-	last_recorded_reward_counter: T::RewardCounter,
+	pub last_recorded_reward_counter: T::RewardCounter,
 	/// The last recorded total payouts of the reward pool.
 	///
 	/// Payouts is essentially income of the pool.
 	///
 	/// Update criteria is same as that of `last_recorded_reward_counter`.
-	last_recorded_total_payouts: BalanceOf<T>,
+	pub last_recorded_total_payouts: BalanceOf<T>,
 	/// Total amount that this pool has paid out so far to the members.
-	total_rewards_claimed: BalanceOf<T>,
+	pub total_rewards_claimed: BalanceOf<T>,
 	/// The amount of commission pending to be claimed.
-	total_commission_pending: BalanceOf<T>,
+	pub total_commission_pending: BalanceOf<T>,
 	/// The amount of commission that has been claimed.
-	total_commission_claimed: BalanceOf<T>,
+	pub total_commission_claimed: BalanceOf<T>,
 }
 
 impl<T: Config> RewardPool<T> {
@@ -1455,15 +1508,24 @@ impl<T: Config> RewardPool<T> {
 }
 
 /// An unbonding pool. This is always mapped with an era.
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, DefaultNoBound, RuntimeDebugNoBound)]
-#[cfg_attr(feature = "std", derive(Clone, PartialEq, Eq))]
+#[derive(
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	DefaultNoBound,
+	RuntimeDebugNoBound,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+)]
 #[codec(mel_bound(T: Config))]
 #[scale_info(skip_type_params(T))]
 pub struct UnbondPool<T: Config> {
 	/// The points in this pool.
-	points: BalanceOf<T>,
+	pub points: BalanceOf<T>,
 	/// The funds in the pool.
-	balance: BalanceOf<T>,
+	pub balance: BalanceOf<T>,
 }
 
 impl<T: Config> UnbondPool<T> {
@@ -1498,17 +1560,26 @@ impl<T: Config> UnbondPool<T> {
 	}
 }
 
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, DefaultNoBound, RuntimeDebugNoBound)]
-#[cfg_attr(feature = "std", derive(Clone, PartialEq))]
+#[derive(
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	DefaultNoBound,
+	RuntimeDebugNoBound,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+)]
 #[codec(mel_bound(T: Config))]
 #[scale_info(skip_type_params(T))]
 pub struct SubPools<T: Config> {
 	/// A general, era agnostic pool of funds that have fully unbonded. The pools
 	/// of `Self::with_era` will lazily be merged into into this pool if they are
 	/// older then `current_era - TotalUnbondingPools`.
-	no_era: UnbondPool<T>,
+	pub no_era: UnbondPool<T>,
 	/// Map of era in which a pool becomes unbonded in => unbond pools.
-	with_era: BoundedBTreeMap<EraIndex, UnbondPool<T>, TotalUnbondingPools<T>>,
+	pub with_era: BoundedBTreeMap<EraIndex, UnbondPool<T>, TotalUnbondingPools<T>>,
 }
 
 impl<T: Config> SubPools<T> {
@@ -1980,7 +2051,9 @@ pub mod pallet {
 		NotSupported,
 	}
 
-	#[derive(Encode, Decode, PartialEq, TypeInfo, PalletError, RuntimeDebug)]
+	#[derive(
+		Encode, Decode, DecodeWithMemTracking, PartialEq, TypeInfo, PalletError, RuntimeDebug,
+	)]
 	pub enum DefensiveError {
 		/// There isn't enough space in the unbond pool.
 		NotEnoughSpaceInUnbondPool,
diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs
index 3d3cd470bc24cc3fd63eaf09a52b99ee5eb7b33e..fa4349d1d94c848ff46435daa409d3c659e19388 100644
--- a/substrate/frame/offences/benchmarking/src/inner.rs
+++ b/substrate/frame/offences/benchmarking/src/inner.rs
@@ -170,6 +170,13 @@ fn make_offenders<T: Config>(
 	Ok(id_tuples)
 }
 
+#[cfg(test)]
+fn run_staking_next_block<T: Config>() {
+	use frame_support::traits::Hooks;
+	System::<T>::set_block_number(System::<T>::block_number().saturating_add(1u32.into()));
+	Staking::<T>::on_initialize(System::<T>::block_number());
+}
+
 #[cfg(test)]
 fn assert_all_slashes_applied<T>(offender_count: usize)
 where
@@ -182,10 +189,10 @@ where
 	// make sure that all slashes have been applied
 	// deposit to reporter + reporter account endowed.
 	assert_eq!(System::<T>::read_events_for_pallet::<pallet_balances::Event<T>>().len(), 2);
-	// (n nominators + one validator) * slashed + Slash Reported
+	// (n nominators + one validator) * slashed + Slash Reported + Slash Computed
 	assert_eq!(
 		System::<T>::read_events_for_pallet::<pallet_staking::Event<T>>().len(),
-		1 * (offender_count + 1) as usize + 1
+		1 * (offender_count + 1) as usize + 2
 	);
 	// offence
 	assert_eq!(System::<T>::read_events_for_pallet::<pallet_offences::Event>().len(), 1);
@@ -232,6 +239,8 @@ mod benchmarks {
 
 		#[cfg(test)]
 		{
+			// slashes applied at the next block.
+			run_staking_next_block::<T>();
 			assert_all_slashes_applied::<T>(n as usize);
 		}
 
@@ -266,6 +275,8 @@ mod benchmarks {
 		}
 		#[cfg(test)]
 		{
+			// slashes applied at the next block.
+			run_staking_next_block::<T>();
 			assert_all_slashes_applied::<T>(n as usize);
 		}
 
diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs
index 46a4e18c5e8fc23f5312833d59de27a88bd82b7f..63e440d9e004238e19af547cb7045c77355fe692 100644
--- a/substrate/frame/offences/benchmarking/src/mock.rs
+++ b/substrate/frame/offences/benchmarking/src/mock.rs
@@ -33,7 +33,6 @@ use sp_runtime::{
 };
 
 type AccountId = u64;
-type Balance = u64;
 
 #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
 impl frame_system::Config for Test {
@@ -54,8 +53,8 @@ impl pallet_timestamp::Config for Test {
 	type WeightInfo = ();
 }
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Test>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 sp_runtime::impl_opaque_keys! {
@@ -95,6 +94,7 @@ impl pallet_session::Config for Test {
 	type RuntimeEvent = RuntimeEvent;
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = pallet_staking::StashOf<Test>;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs
index 849ffddf4fb3c0c4e88fbf534a33312d0e2a83b9..744e2d18d67bca99e75136c17bf5a7f5b8586187 100644
--- a/substrate/frame/preimage/src/lib.rs
+++ b/substrate/frame/preimage/src/lib.rs
@@ -88,12 +88,12 @@ pub enum RequestStatus<AccountId, Ticket> {
 	Requested { maybe_ticket: Option<(AccountId, Ticket)>, count: u32, maybe_len: Option<u32> },
 }
 
-type BalanceOf<T> =
+pub type BalanceOf<T> =
 	<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
-type TicketOf<T> = <T as Config>::Consideration;
+pub type TicketOf<T> = <T as Config>::Consideration;
 
 /// Maximum size of preimage we can store is 4mb.
-const MAX_SIZE: u32 = 4 * 1024 * 1024;
+pub const MAX_SIZE: u32 = 4 * 1024 * 1024;
 /// Hard-limit on the number of hashes that can be passed to `ensure_updated`.
 ///
 /// Exists only for benchmarking purposes.
@@ -132,7 +132,7 @@ pub mod pallet {
 	pub struct Pallet<T>(_);
 
 	#[pallet::event]
-	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	#[pallet::generate_deposit(pub fn deposit_event)]
 	pub enum Event<T: Config> {
 		/// A preimage has been noted.
 		Noted { hash: T::Hash },
@@ -172,16 +172,16 @@ pub mod pallet {
 	/// The request status of a given hash.
 	#[deprecated = "RequestStatusFor"]
 	#[pallet::storage]
-	pub(super) type StatusFor<T: Config> =
+	pub type StatusFor<T: Config> =
 		StorageMap<_, Identity, T::Hash, OldRequestStatus<T::AccountId, BalanceOf<T>>>;
 
 	/// The request status of a given hash.
 	#[pallet::storage]
-	pub(super) type RequestStatusFor<T: Config> =
+	pub type RequestStatusFor<T: Config> =
 		StorageMap<_, Identity, T::Hash, RequestStatus<T::AccountId, TicketOf<T>>>;
 
 	#[pallet::storage]
-	pub(super) type PreimageFor<T: Config> =
+	pub type PreimageFor<T: Config> =
 		StorageMap<_, Identity, (T::Hash, u32), BoundedVec<u8, ConstU32<MAX_SIZE>>>;
 
 	#[pallet::call(weight = T::WeightInfo)]
diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs
index 1fe9772617221c069ccd71543ae1de8e4af0b4b2..594d1721cd41bcb08c7ed3238dad7c19bc225553 100644
--- a/substrate/frame/proxy/src/lib.rs
+++ b/substrate/frame/proxy/src/lib.rs
@@ -167,7 +167,28 @@ pub mod pallet {
 		#[pallet::constant]
 		type AnnouncementDepositFactor: Get<BalanceOf<Self>>;
 
-		/// Provider for the block number. Normally this is the `frame_system` pallet.
+		/// Query the current block number.
+		///
+		/// Must return monotonically increasing values when called from consecutive blocks.
+		/// Can be configured to return either:
+		/// - the local block number of the runtime via `frame_system::Pallet`
+		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
+		/// - an arbitrary value through a custom implementation of the trait
+		///
+		/// There is currently no migration provided to "hot-swap" block number providers and it may
+		/// result in undefined behavior when doing so. Parachains are therefore best off setting
+		/// this to their local block number provider if they have the pallet already deployed.
+		///
+		/// Suggested values:
+		/// - Solo- and Relay-chains: `frame_system::Pallet`
+		/// - Parachains that may produce blocks sparingly or only when needed (on-demand):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: `RelaychainDataProvider`
+		/// - Parachains with a reliably block production rate (PLO or bulk-coretime):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: no strong recommendation. Both local and remote
+		///     providers can be used. Relay provider can be a bit better in cases where the
+		///     parachain is lagging its block production to avoid clock skew.
 		type BlockNumberProvider: BlockNumberProvider;
 	}
 
diff --git a/substrate/frame/proxy/src/tests.rs b/substrate/frame/proxy/src/tests.rs
index 14389b03ac7e2593bcc37188d21a8a44974cac2f..b52dc5ce0e3989d00873d838e5f84cacc6cb2f30 100644
--- a/substrate/frame/proxy/src/tests.rs
+++ b/substrate/frame/proxy/src/tests.rs
@@ -64,6 +64,7 @@ impl pallet_utility::Config for Test {
 	PartialOrd,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	RuntimeDebug,
 	MaxEncodedLen,
 	scale_info::TypeInfo,
diff --git a/substrate/frame/ranked-collective/src/lib.rs b/substrate/frame/ranked-collective/src/lib.rs
index e34cf3d8df7114099ee02da5731012a81e38f1d7..4b1b3d9010db188348d30c4f8b2c07071544f867 100644
--- a/substrate/frame/ranked-collective/src/lib.rs
+++ b/substrate/frame/ranked-collective/src/lib.rs
@@ -42,7 +42,7 @@
 
 extern crate alloc;
 
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::marker::PhantomData;
 use scale_info::TypeInfo;
 use sp_arithmetic::traits::Saturating;
@@ -90,6 +90,7 @@ pub type Votes = u32;
 	TypeInfo,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	MaxEncodedLen,
 )]
 #[scale_info(skip_type_params(T, I, M))]
@@ -189,7 +190,18 @@ impl MemberRecord {
 }
 
 /// Record needed for every vote.
-#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	PartialEq,
+	Eq,
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum VoteRecord {
 	/// Vote was an aye with given vote weight.
 	Aye(Votes),
diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs
index 42fb641983f6308a9809ca8ec856ee2d019ab569..8159bbefa76b1765d7986781887ac373ea694924 100644
--- a/substrate/frame/recovery/src/lib.rs
+++ b/substrate/frame/recovery/src/lib.rs
@@ -240,7 +240,28 @@ pub mod pallet {
 			+ GetDispatchInfo
 			+ From<frame_system::Call<Self>>;
 
-		/// Provider for the block number. Normally this is the `frame_system` pallet.
+		/// Query the current block number.
+		///
+		/// Must return monotonically increasing values when called from consecutive blocks.
+		/// Can be configured to return either:
+		/// - the local block number of the runtime via `frame_system::Pallet`
+		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
+		/// - an arbitrary value through a custom implementation of the trait
+		///
+		/// There is currently no migration provided to "hot-swap" block number providers and it may
+		/// result in undefined behavior when doing so. Parachains are therefore best off setting
+		/// this to their local block number provider if they have the pallet already deployed.
+		///
+		/// Suggested values:
+		/// - Solo- and Relay-chains: `frame_system::Pallet`
+		/// - Parachains that may produce blocks sparingly or only when needed (on-demand):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: `RelaychainDataProvider`
+		/// - Parachains with a reliably block production rate (PLO or bulk-coretime):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: no strong recommendation. Both local and remote
+		///     providers can be used. Relay provider can be a bit better in cases where the
+		///     parachain is lagging its block production to avoid clock skew.
 		type BlockNumberProvider: BlockNumberProvider;
 
 		/// The currency mechanism.
diff --git a/substrate/frame/referenda/src/benchmarking.rs b/substrate/frame/referenda/src/benchmarking.rs
index 895f95dbec556e77781f9d46f2cf552ed8cb0956..59499d9c8bf086f21b010e60c7ba23f1e1a56d51 100644
--- a/substrate/frame/referenda/src/benchmarking.rs
+++ b/substrate/frame/referenda/src/benchmarking.rs
@@ -19,7 +19,7 @@
 
 use super::*;
 use crate::Pallet as Referenda;
-use alloc::{vec, vec::Vec};
+use alloc::{borrow::Cow, vec, vec::Vec};
 use assert_matches::assert_matches;
 use frame_benchmarking::v1::{
 	account, benchmarks_instance_pallet, whitelist_account, BenchmarkError,
@@ -110,7 +110,7 @@ fn fill_queue<T: Config<I>, I: 'static>(
 	others
 }
 
-fn info<T: Config<I>, I: 'static>(index: ReferendumIndex) -> &'static TrackInfoOf<T, I> {
+fn info<T: Config<I>, I: 'static>(index: ReferendumIndex) -> Cow<'static, TrackInfoOf<T, I>> {
 	let status = Referenda::<T, I>::ensure_ongoing(index).unwrap();
 	T::Tracks::info(status.track).expect("Id value returned from T::Tracks")
 }
diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs
index e6a895f9c5933961a831525035f7201d9e20cbe3..b58baa341cf596ef531b6dc4969904236dc791bb 100644
--- a/substrate/frame/referenda/src/lib.rs
+++ b/substrate/frame/referenda/src/lib.rs
@@ -100,7 +100,7 @@ pub use self::{
 		BalanceOf, BlockNumberFor, BoundedCallOf, CallOf, Curve, DecidingStatus, DecidingStatusOf,
 		Deposit, InsertSorted, NegativeImbalanceOf, PalletsOriginOf, ReferendumIndex,
 		ReferendumInfo, ReferendumInfoOf, ReferendumStatus, ReferendumStatusOf, ScheduleAddressOf,
-		TallyOf, TrackIdOf, TrackInfo, TrackInfoOf, TracksInfo, VotesOf,
+		TallyOf, Track, TrackIdOf, TrackInfo, TrackInfoOf, TracksInfo, VotesOf,
 	},
 	weights::WeightInfo,
 };
@@ -117,27 +117,6 @@ pub mod benchmarking;
 
 pub use frame_support::traits::Get;
 
-#[macro_export]
-macro_rules! impl_tracksinfo_get {
-	($tracksinfo:ty, $balance:ty, $blocknumber:ty) => {
-		impl
-			$crate::Get<
-				$crate::Vec<(
-					<$tracksinfo as $crate::TracksInfo<$balance, $blocknumber>>::Id,
-					$crate::TrackInfo<$balance, $blocknumber>,
-				)>,
-			> for $tracksinfo
-		{
-			fn get() -> $crate::Vec<(
-				<$tracksinfo as $crate::TracksInfo<$balance, $blocknumber>>::Id,
-				$crate::TrackInfo<$balance, $blocknumber>,
-			)> {
-				<$tracksinfo as $crate::TracksInfo<$balance, $blocknumber>>::tracks().to_vec()
-			}
-		}
-	};
-}
-
 const ASSEMBLY_ID: LockIdentifier = *b"assembly";
 
 #[frame_support::pallet]
@@ -228,17 +207,11 @@ pub mod pallet {
 
 		// The other stuff.
 		/// Information concerning the different referendum tracks.
-		#[pallet::constant]
-		type Tracks: Get<
-				Vec<(
-					<Self::Tracks as TracksInfo<BalanceOf<Self, I>, BlockNumberFor<Self, I>>>::Id,
-					TrackInfo<BalanceOf<Self, I>, BlockNumberFor<Self, I>>,
-				)>,
-			> + TracksInfo<
-				BalanceOf<Self, I>,
-				BlockNumberFor<Self, I>,
-				RuntimeOrigin = <Self::RuntimeOrigin as OriginTrait>::PalletsOrigin,
-			>;
+		type Tracks: TracksInfo<
+			BalanceOf<Self, I>,
+			BlockNumberFor<Self, I>,
+			RuntimeOrigin = <Self::RuntimeOrigin as OriginTrait>::PalletsOrigin,
+		>;
 
 		/// The preimage provider.
 		type Preimages: QueryPreimage<H = Self::Hashing> + StorePreimage;
@@ -249,6 +222,14 @@ pub mod pallet {
 		type BlockNumberProvider: BlockNumberProvider;
 	}
 
+	#[pallet::extra_constants]
+	impl<T: Config<I>, I: 'static> Pallet<T, I> {
+		#[pallet::constant_name(Tracks)]
+		fn tracks() -> Vec<Track<TrackIdOf<T, I>, BalanceOf<T, I>, BlockNumberFor<T, I>>> {
+			T::Tracks::tracks().map(|t| t.into_owned()).collect()
+		}
+	}
+
 	/// The next free referendum index, aka the number of referenda started so far.
 	#[pallet::storage]
 	pub type ReferendumCount<T, I = ()> = StorageValue<_, ReferendumIndex, ValueQuery>;
@@ -532,7 +513,7 @@ pub mod pallet {
 			let who = ensure_signed(origin)?;
 			let mut status = Self::ensure_ongoing(index)?;
 			ensure!(status.decision_deposit.is_none(), Error::<T, I>::HasDeposit);
-			let track = Self::track(status.track).ok_or(Error::<T, I>::NoTrack)?;
+			let track = T::Tracks::info(status.track).ok_or(Error::<T, I>::NoTrack)?;
 			status.decision_deposit =
 				Some(Self::take_deposit(who.clone(), track.decision_deposit)?);
 			let now = T::BlockNumberProvider::current_block_number();
@@ -668,7 +649,7 @@ pub mod pallet {
 				if let Some((index, mut status)) = Self::next_for_deciding(&mut track_queue) {
 					let now = T::BlockNumberProvider::current_block_number();
 					let (maybe_alarm, branch) =
-						Self::begin_deciding(&mut status, index, now, track_info);
+						Self::begin_deciding(&mut status, index, now, &track_info);
 					if let Some(set_alarm) = maybe_alarm {
 						Self::ensure_alarm_at(&mut status, index, set_alarm);
 					}
@@ -756,7 +737,7 @@ impl<T: Config<I>, I: 'static> Polling<T::Tally> for Pallet<T, I> {
 	type Class = TrackIdOf<T, I>;
 
 	fn classes() -> Vec<Self::Class> {
-		T::Tracks::tracks().iter().map(|x| x.0).collect()
+		T::Tracks::track_ids().collect()
 	}
 
 	fn access_poll<R>(
@@ -849,10 +830,9 @@ impl<T: Config<I>, I: 'static> Polling<T::Tally> for Pallet<T, I> {
 	#[cfg(feature = "runtime-benchmarks")]
 	fn max_ongoing() -> (Self::Class, u32) {
 		let r = T::Tracks::tracks()
-			.iter()
-			.max_by_key(|(_, info)| info.max_deciding)
+			.max_by_key(|t| t.info.max_deciding)
 			.expect("Always one class");
-		(r.0, r.1.max_deciding)
+		(r.id, r.info.max_deciding)
 	}
 }
 
@@ -874,7 +854,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		let info = ReferendumInfoFor::<T, I>::get(ref_index).ok_or(Error::<T, I>::BadReferendum)?;
 		match info {
 			ReferendumInfo::Ongoing(status) => {
-				let track = Self::track(status.track).ok_or(Error::<T, I>::NoTrack)?;
+				let track = T::Tracks::info(status.track).ok_or(Error::<T, I>::NoTrack)?;
 				let elapsed = if let Some(deciding) = status.deciding {
 					T::BlockNumberProvider::current_block_number().saturating_sub(deciding.since)
 				} else {
@@ -1104,7 +1084,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 	) -> (ReferendumInfoOf<T, I>, bool, ServiceBranch) {
 		let mut dirty = false;
 		// Should it begin being decided?
-		let track = match Self::track(status.track) {
+		let track = match T::Tracks::info(status.track) {
 			Some(x) => x,
 			None => return (ReferendumInfo::Ongoing(status), false, ServiceBranch::Fail),
 		};
@@ -1140,7 +1120,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 						let prepare_end = status.submitted.saturating_add(track.prepare_period);
 						if now >= prepare_end {
 							let (maybe_alarm, branch) =
-								Self::ready_for_deciding(now, track, index, &mut status);
+								Self::ready_for_deciding(now, &track, index, &mut status);
 							if let Some(set_alarm) = maybe_alarm {
 								alarm = alarm.min(set_alarm);
 							}
@@ -1187,7 +1167,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 							Self::ensure_no_alarm(&mut status);
 							Self::note_one_fewer_deciding(status.track);
 							let (desired, call) = (status.enactment, status.proposal);
-							Self::schedule_enactment(index, track, desired, status.origin, call);
+							Self::schedule_enactment(index, &track, desired, status.origin, call);
 							Self::deposit_event(Event::<T, I>::Confirmed {
 								index,
 								tally: status.tally,
@@ -1237,7 +1217,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 						ServiceBranch::ContinueNotConfirming
 					}
 				};
-				alarm = Self::decision_time(deciding, &status.tally, status.track, track);
+				alarm = Self::decision_time(deciding, &status.tally, status.track, &track);
 			},
 		}
 
@@ -1303,13 +1283,6 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		}
 	}
 
-	/// Get the track info value for the track `id`.
-	fn track(id: TrackIdOf<T, I>) -> Option<&'static TrackInfoOf<T, I>> {
-		let tracks = T::Tracks::tracks();
-		let index = tracks.binary_search_by_key(&id, |x| x.0).unwrap_or_else(|x| x);
-		Some(&tracks[index].1)
-	}
-
 	/// Determine whether the given `tally` would result in a referendum passing at `elapsed` blocks
 	/// into a total decision `period`, given the two curves for `support_needed` and
 	/// `approval_needed`.
@@ -1378,7 +1351,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 			match referendum {
 				ReferendumInfo::Ongoing(status) => {
 					ensure!(
-						Self::track(status.track).is_some(),
+						T::Tracks::info(status.track).is_some(),
 						"No track info for the track of the referendum."
 					);
 
@@ -1404,8 +1377,8 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 	///  [`ReferendumInfoFor`] storage map.
 	#[cfg(any(feature = "try-runtime", test))]
 	fn try_state_tracks() -> Result<(), sp_runtime::TryRuntimeError> {
-		T::Tracks::tracks().iter().try_for_each(|track| {
-			TrackQueue::<T, I>::get(track.0).iter().try_for_each(
+		T::Tracks::tracks().try_for_each(|track| {
+			TrackQueue::<T, I>::get(track.id).iter().try_for_each(
 				|(referendum_index, _)| -> Result<(), sp_runtime::TryRuntimeError> {
 					ensure!(
 					ReferendumInfoFor::<T, I>::contains_key(referendum_index),
diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs
index 10e5f35bbabf1035ba41f8fa5567ecf6a410b010..52a89d3f7cb7d2e7f11d2578a13819aabb3e309b 100644
--- a/substrate/frame/referenda/src/mock.rs
+++ b/substrate/frame/referenda/src/mock.rs
@@ -18,8 +18,9 @@
 //! The crate's tests.
 
 use super::*;
-use crate as pallet_referenda;
-use codec::{Decode, Encode, MaxEncodedLen};
+use crate::{self as pallet_referenda, types::Track};
+use alloc::borrow::Cow;
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use frame_support::{
 	assert_ok, derive_impl, ord_parameter_types, parameter_types,
 	traits::{
@@ -29,6 +30,7 @@ use frame_support::{
 };
 use frame_system::{EnsureRoot, EnsureSignedBy};
 use sp_runtime::{
+	str_array as s,
 	traits::{BlakeTwo256, Hash},
 	BuildStorage, DispatchResult, Perbill,
 };
@@ -103,12 +105,13 @@ pub struct TestTracksInfo;
 impl TracksInfo<u64, u64> for TestTracksInfo {
 	type Id = u8;
 	type RuntimeOrigin = <RuntimeOrigin as OriginTrait>::PalletsOrigin;
-	fn tracks() -> &'static [(Self::Id, TrackInfo<u64, u64>)] {
-		static DATA: [(u8, TrackInfo<u64, u64>); 3] = [
-			(
-				0u8,
-				TrackInfo {
-					name: "root",
+
+	fn tracks() -> impl Iterator<Item = Cow<'static, Track<Self::Id, u64, u64>>> {
+		static DATA: [Track<u8, u64, u64>; 3] = [
+			Track {
+				id: 0u8,
+				info: TrackInfo {
+					name: s("root"),
 					max_deciding: 1,
 					decision_deposit: 10,
 					prepare_period: 4,
@@ -126,11 +129,11 @@ impl TracksInfo<u64, u64> for TestTracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				1u8,
-				TrackInfo {
-					name: "none",
+			},
+			Track {
+				id: 1u8,
+				info: TrackInfo {
+					name: s("none"),
 					max_deciding: 3,
 					decision_deposit: 1,
 					prepare_period: 2,
@@ -148,11 +151,11 @@ impl TracksInfo<u64, u64> for TestTracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
-			(
-				2u8,
-				TrackInfo {
-					name: "none",
+			},
+			Track {
+				id: 2u8,
+				info: TrackInfo {
+					name: s("none"),
 					max_deciding: 3,
 					decision_deposit: 1,
 					prepare_period: 2,
@@ -170,9 +173,9 @@ impl TracksInfo<u64, u64> for TestTracksInfo {
 						ceil: Perbill::from_percent(100),
 					},
 				},
-			),
+			},
 		];
-		&DATA[..]
+		DATA.iter().map(Cow::Borrowed)
 	}
 	fn track_for(id: &Self::RuntimeOrigin) -> Result<Self::Id, ()> {
 		if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) {
@@ -187,7 +190,6 @@ impl TracksInfo<u64, u64> for TestTracksInfo {
 		}
 	}
 }
-impl_tracksinfo_get!(TestTracksInfo, u64, u64);
 
 impl Config for Test {
 	type WeightInfo = ();
@@ -237,7 +239,9 @@ impl ExtBuilder {
 	}
 }
 
-#[derive(Encode, Debug, Decode, TypeInfo, Eq, PartialEq, Clone, MaxEncodedLen)]
+#[derive(
+	Encode, Debug, Decode, DecodeWithMemTracking, TypeInfo, Eq, PartialEq, Clone, MaxEncodedLen,
+)]
 pub struct Tally {
 	pub ayes: u32,
 	pub nays: u32,
diff --git a/substrate/frame/referenda/src/tests.rs b/substrate/frame/referenda/src/tests.rs
index 9f851992496cbede493eafb722d86360f6d034e3..d556d10c44a6baff0a5b364c1fb20e69691f76ab 100644
--- a/substrate/frame/referenda/src/tests.rs
+++ b/substrate/frame/referenda/src/tests.rs
@@ -289,11 +289,12 @@ fn alarm_interval_works() {
 fn decision_time_is_correct() {
 	ExtBuilder::default().build_and_execute(|| {
 		let decision_time = |since: u64| {
+			let track = TestTracksInfo::tracks().next().unwrap();
 			Pallet::<Test>::decision_time(
 				&DecidingStatus { since: since.into(), confirming: None },
 				&Tally { ayes: 100, nays: 5 },
-				TestTracksInfo::tracks()[0].0,
-				&TestTracksInfo::tracks()[0].1,
+				track.id,
+				&track.info,
 			)
 		};
 
diff --git a/substrate/frame/referenda/src/types.rs b/substrate/frame/referenda/src/types.rs
index e97e7cc8df6d880ddc6aa2631878e715ba97344a..6a1eb8e82e4e3bf6d516c6c731fc30c4cc39db74 100644
--- a/substrate/frame/referenda/src/types.rs
+++ b/substrate/frame/referenda/src/types.rs
@@ -18,6 +18,7 @@
 //! Miscellaneous additional datatypes.
 
 use super::*;
+use alloc::borrow::Cow;
 use codec::{Decode, Encode, EncodeLike, MaxEncodedLen};
 use core::fmt::Debug;
 use frame_support::{
@@ -115,10 +116,13 @@ pub struct Deposit<AccountId, Balance> {
 	pub amount: Balance,
 }
 
-#[derive(Clone, Encode, TypeInfo)]
-pub struct TrackInfo<Balance, Moment> {
+pub const DEFAULT_MAX_TRACK_NAME_LEN: usize = 25;
+
+/// Detailed information about the configuration of a referenda track
+#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Debug)]
+pub struct TrackInfo<Balance, Moment, const N: usize = DEFAULT_MAX_TRACK_NAME_LEN> {
 	/// Name of this track.
-	pub name: &'static str,
+	pub name: [u8; N],
 	/// A limit for the number of referenda on this track that can be being decided at once.
 	/// For Root origin this should generally be just one.
 	pub max_deciding: u32,
@@ -140,42 +144,67 @@ pub struct TrackInfo<Balance, Moment> {
 	pub min_support: Curve,
 }
 
+/// Track groups the information of a voting track with its corresponding identifier
+#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Debug)]
+pub struct Track<Id, Balance, Moment, const N: usize = DEFAULT_MAX_TRACK_NAME_LEN> {
+	pub id: Id,
+	pub info: TrackInfo<Balance, Moment, N>,
+}
+
 /// Information on the voting tracks.
-pub trait TracksInfo<Balance, Moment> {
+pub trait TracksInfo<Balance, Moment, const N: usize = DEFAULT_MAX_TRACK_NAME_LEN>
+where
+	Balance: Clone + Debug + Eq + 'static,
+	Moment: Clone + Debug + Eq + 'static,
+{
 	/// The identifier for a track.
 	type Id: Copy + Parameter + Ord + PartialOrd + Send + Sync + 'static + MaxEncodedLen;
 
 	/// The origin type from which a track is implied.
 	type RuntimeOrigin;
 
-	/// Sorted array of known tracks and their information.
+	/// Return the sorted iterable list of known tracks and their information.
 	///
-	/// The array MUST be sorted by `Id`. Consumers of this trait are advised to assert
+	/// The iterator MUST be sorted by `Id`. Consumers of this trait are advised to assert
 	/// [`Self::check_integrity`] prior to any use.
-	fn tracks() -> &'static [(Self::Id, TrackInfo<Balance, Moment>)];
+	fn tracks() -> impl Iterator<Item = Cow<'static, Track<Self::Id, Balance, Moment, N>>>;
 
 	/// Determine the voting track for the given `origin`.
 	fn track_for(origin: &Self::RuntimeOrigin) -> Result<Self::Id, ()>;
 
-	/// Return the track info for track `id`, by default this just looks it up in `Self::tracks()`.
-	fn info(id: Self::Id) -> Option<&'static TrackInfo<Balance, Moment>> {
-		let tracks = Self::tracks();
-		let maybe_index = tracks.binary_search_by_key(&id, |t| t.0).ok()?;
+	/// Return the list of identifiers of the known tracks.
+	fn track_ids() -> impl Iterator<Item = Self::Id> {
+		Self::tracks().map(|x| x.id)
+	}
 
-		tracks.get(maybe_index).map(|(_, info)| info)
+	/// Return the track info for track `id`, by default this just looks it up in `Self::tracks()`.
+	fn info(id: Self::Id) -> Option<Cow<'static, TrackInfo<Balance, Moment, N>>> {
+		Self::tracks().find(|x| x.id == id).map(|t| match t {
+			Cow::Borrowed(x) => Cow::Borrowed(&x.info),
+			Cow::Owned(x) => Cow::Owned(x.info),
+		})
 	}
 
 	/// Check assumptions about the static data that this trait provides.
-	fn check_integrity() -> Result<(), &'static str>
-	where
-		Balance: 'static,
-		Moment: 'static,
-	{
-		if Self::tracks().windows(2).all(|w| w[0].0 < w[1].0) {
-			Ok(())
-		} else {
-			Err("The tracks that were returned by `tracks` were not sorted by `Id`")
-		}
+	fn check_integrity() -> Result<(), &'static str> {
+		use core::cmp::Ordering;
+		// Adapted from Iterator::is_sorted implementation available in nightly
+		// https://github.com/rust-lang/rust/issues/53485
+		let mut iter = Self::tracks();
+		let mut last = match iter.next() {
+			Some(ref e) => e.id,
+			None => return Ok(()),
+		};
+		iter.all(|curr| {
+			let curr = curr.as_ref().id;
+			if let Ordering::Greater = last.cmp(&curr) {
+				return false;
+			}
+			last = curr;
+			true
+		})
+		.then_some(())
+		.ok_or("The tracks that were returned by `tracks` were not sorted by `Id`")
 	}
 }
 
@@ -551,7 +580,7 @@ impl Debug for Curve {
 mod tests {
 	use super::*;
 	use frame_support::traits::ConstU32;
-	use sp_runtime::PerThing;
+	use sp_runtime::{str_array as s, PerThing};
 
 	const fn percent(x: u128) -> FixedI64 {
 		FixedI64::from_rational(x, 100)
@@ -703,12 +732,12 @@ mod tests {
 		impl TracksInfo<u64, u64> for BadTracksInfo {
 			type Id = u8;
 			type RuntimeOrigin = <RuntimeOrigin as OriginTrait>::PalletsOrigin;
-			fn tracks() -> &'static [(Self::Id, TrackInfo<u64, u64>)] {
-				static DATA: [(u8, TrackInfo<u64, u64>); 2] = [
-					(
-						1u8,
-						TrackInfo {
-							name: "root",
+			fn tracks() -> impl Iterator<Item = Cow<'static, Track<Self::Id, u64, u64>>> {
+				static DATA: [Track<u8, u64, u64>; 2] = [
+					Track {
+						id: 1u8,
+						info: TrackInfo {
+							name: s("root"),
 							max_deciding: 1,
 							decision_deposit: 10,
 							prepare_period: 4,
@@ -726,11 +755,11 @@ mod tests {
 								ceil: Perbill::from_percent(100),
 							},
 						},
-					),
-					(
-						0u8,
-						TrackInfo {
-							name: "none",
+					},
+					Track {
+						id: 0u8,
+						info: TrackInfo {
+							name: s("none"),
 							max_deciding: 3,
 							decision_deposit: 1,
 							prepare_period: 2,
@@ -748,9 +777,9 @@ mod tests {
 								ceil: Perbill::from_percent(100),
 							},
 						},
-					),
+					},
 				];
-				&DATA[..]
+				DATA.iter().map(Cow::Borrowed)
 			}
 			fn track_for(_: &Self::RuntimeOrigin) -> Result<Self::Id, ()> {
 				unimplemented!()
diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml
index b207a6041b9b6cf3230a713270c48d23eb5b4b8b..33d447e67a20369f2a1f18623a8a879bf654fce1 100644
--- a/substrate/frame/revive/rpc/Cargo.toml
+++ b/substrate/frame/revive/rpc/Cargo.toml
@@ -75,3 +75,6 @@ pretty_assertions = { workspace = true }
 static_init = { workspace = true }
 substrate-cli-test-utils = { workspace = true }
 subxt-signer = { workspace = true, features = ["unstable-eth"] }
+
+[build-dependencies]
+git2 = { version = "0.20.0", default-features = false }
diff --git a/substrate/frame/revive/rpc/build.rs b/substrate/frame/revive/rpc/build.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d2ea601211a00a6f0b44e5c4dc272f592cdb0a16
--- /dev/null
+++ b/substrate/frame/revive/rpc/build.rs
@@ -0,0 +1,44 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+use std::process::Command;
+
+/// Get the current branch and commit hash.
+fn main() {
+	let output = Command::new("rustc")
+		.arg("--version")
+		.output()
+		.expect("cannot get the current rustc version");
+	// Exports the default rustc --version output:
+	// e.g. rustc 1.83.0 (90b35a623 2024-11-26)
+	// into the usual Ethereum web3_clientVersion format
+	// e.g. rustc1.83.0
+	let rustc_version = String::from_utf8_lossy(&output.stdout)
+		.split_whitespace()
+		.take(2)
+		.collect::<Vec<_>>()
+		.join("");
+	let target = std::env::var("TARGET").unwrap_or_else(|_| "unknown".to_string());
+
+	let repo = git2::Repository::open("../../../..").expect("should be a repository");
+	let head = repo.head().expect("should have head");
+	let commit = head.peel_to_commit().expect("should have commit");
+	let branch = head.shorthand().unwrap_or("unknown").to_string();
+	let id = &commit.id().to_string()[..7];
+	println!("cargo:rustc-env=GIT_REVISION={branch}-{id}");
+	println!("cargo:rustc-env=RUSTC_VERSION={rustc_version}");
+	println!("cargo:rustc-env=TARGET={target}");
+}
diff --git a/substrate/frame/revive/rpc/src/apis/execution_apis.rs b/substrate/frame/revive/rpc/src/apis/execution_apis.rs
index f55209fce585606a026430619ec9453c3163c234..b867e8acf30f06f764f4aea38c94048baa74ef9b 100644
--- a/substrate/frame/revive/rpc/src/apis/execution_apis.rs
+++ b/substrate/frame/revive/rpc/src/apis/execution_apis.rs
@@ -166,4 +166,8 @@ pub trait EthRpc {
 	/// The string value of current network id
 	#[method(name = "net_version")]
 	async fn net_version(&self) -> RpcResult<String>;
+
+	/// The string value of the current client version
+	#[method(name = "web3_clientVersion")]
+	async fn web3_client_version(&self) -> RpcResult<String>;
 }
diff --git a/substrate/frame/revive/rpc/src/cli.rs b/substrate/frame/revive/rpc/src/cli.rs
index 5844d36a87fff654d6f0c1b2424c9977dcef1bfc..bf9a0e1ec115a67c983fdd54b1bf3e61f908b0c3 100644
--- a/substrate/frame/revive/rpc/src/cli.rs
+++ b/substrate/frame/revive/rpc/src/cli.rs
@@ -51,6 +51,10 @@ pub struct CliCommand {
 	#[clap(long, default_value = "256")]
 	pub cache_size: usize,
 
+	/// Earliest block number to consider when searching for transaction receipts.
+	#[clap(long)]
+	pub earliest_receipt_block: Option<SubstrateBlockNumber>,
+
 	/// The database used to store Ethereum transaction hashes.
 	/// This is only useful if the node needs to act as an archive node and respond to Ethereum RPC
 	/// queries for transactions that are not in the in memory cache.
@@ -98,6 +102,7 @@ fn init_logger(params: &SharedParams) -> anyhow::Result<()> {
 fn build_client(
 	tokio_handle: &tokio::runtime::Handle,
 	cache_size: usize,
+	earliest_receipt_block: Option<SubstrateBlockNumber>,
 	node_rpc_url: &str,
 	database_url: &str,
 	abort_signal: Signals,
@@ -112,7 +117,10 @@ fn build_client(
 			log::info!( target: LOG_TARGET, "Using in-memory database, keeping only {cache_size} blocks in memory");
 		}
 
-		let receipt_extractor = ReceiptExtractor::new(native_to_eth_ratio(&api).await?);
+		let receipt_extractor = ReceiptExtractor::new(
+			native_to_eth_ratio(&api).await?,
+			earliest_receipt_block);
+
 		let receipt_provider: Arc<dyn ReceiptProvider> = Arc::new((
 			CacheReceiptProvider::default(),
 			DBReceiptProvider::new(
@@ -148,6 +156,7 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> {
 		node_rpc_url,
 		cache_size,
 		database_url,
+		earliest_receipt_block,
 		index_until_block,
 		shared_params,
 		..
@@ -188,6 +197,7 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> {
 	let client = build_client(
 		tokio_handle,
 		cache_size,
+		earliest_receipt_block,
 		&node_rpc_url,
 		&database_url,
 		tokio_runtime.block_on(async { Signals::capture() })?,
diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs
index 8d6797722d4f2bf4eb954832692ca9ff11b1d62c..31af6a5bbb0debf9a3941e579971e70c46068c78 100644
--- a/substrate/frame/revive/rpc/src/lib.rs
+++ b/substrate/frame/revive/rpc/src/lib.rs
@@ -352,4 +352,11 @@ impl EthRpcServer for EthRpcServerImpl {
 		let nonce = self.client.nonce(address, block).await?;
 		Ok(nonce)
 	}
+
+	async fn web3_client_version(&self) -> RpcResult<String> {
+		let git_revision = env!("GIT_REVISION");
+		let rustc_version = env!("RUSTC_VERSION");
+		let target = env!("TARGET");
+		Ok(format!("eth-rpc/{git_revision}/{target}/{rustc_version}"))
+	}
 }
diff --git a/substrate/frame/revive/rpc/src/receipt_extractor.rs b/substrate/frame/revive/rpc/src/receipt_extractor.rs
index 6338f42ee0cc7a9e3a60097e0731811b07d412f5..37f470160521433a67c64b6b29ff034a60e07300 100644
--- a/substrate/frame/revive/rpc/src/receipt_extractor.rs
+++ b/substrate/frame/revive/rpc/src/receipt_extractor.rs
@@ -14,9 +14,8 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-
 use crate::{
-	client::SubstrateBlock,
+	client::{SubstrateBlock, SubstrateBlockNumber},
 	subxt_client::{
 		revive::{calls::types::EthTransact, events::ContractEmitted},
 		system::events::ExtrinsicSuccess,
@@ -37,16 +36,22 @@ use sp_core::keccak_256;
 pub struct ReceiptExtractor {
 	/// The native to eth decimal ratio, used to calculated gas from native fees.
 	native_to_eth_ratio: u32,
+
+	/// Earliest block number to consider when searching for transaction receipts.
+	earliest_receipt_block: Option<SubstrateBlockNumber>,
 }
 
 impl ReceiptExtractor {
 	/// Create a new `ReceiptExtractor` with the given native to eth ratio.
-	pub fn new(native_to_eth_ratio: u32) -> Self {
-		Self { native_to_eth_ratio }
+	pub fn new(
+		native_to_eth_ratio: u32,
+		earliest_receipt_block: Option<SubstrateBlockNumber>,
+	) -> Self {
+		Self { native_to_eth_ratio, earliest_receipt_block }
 	}
 
 	/// Extract a [`TransactionSigned`] and a [`ReceiptInfo`] and  from an extrinsic.
-	pub async fn extract_from_extrinsic(
+	async fn extract_from_extrinsic(
 		&self,
 		block: &SubstrateBlock,
 		ext: subxt::blocks::ExtrinsicDetails<SrcChainConfig, subxt::OnlineClient<SrcChainConfig>>,
@@ -139,6 +144,13 @@ impl ReceiptExtractor {
 		&self,
 		block: &SubstrateBlock,
 	) -> Result<Vec<(TransactionSigned, ReceiptInfo)>, ClientError> {
+		if let Some(earliest_receipt_block) = self.earliest_receipt_block {
+			if block.number() < earliest_receipt_block {
+				log::trace!(target: LOG_TARGET, "Block number {block_number} is less than earliest receipt block {earliest_receipt_block}. Skipping.", block_number = block.number(), earliest_receipt_block = earliest_receipt_block);
+				return Ok(vec![]);
+			}
+		}
+
 		// Filter extrinsics from pallet_revive
 		let extrinsics = block.extrinsics().await.inspect_err(|err| {
 			log::debug!(target: LOG_TARGET, "Error fetching for #{:?} extrinsics: {err:?}", block.number());
diff --git a/substrate/frame/revive/rpc/src/receipt_provider/db.rs b/substrate/frame/revive/rpc/src/receipt_provider/db.rs
index c471d009022ab99568357f3d82ed313339963ef4..2a20eaa411f299991744aa81f4212dfb0f42ab23 100644
--- a/substrate/frame/revive/rpc/src/receipt_provider/db.rs
+++ b/substrate/frame/revive/rpc/src/receipt_provider/db.rs
@@ -410,7 +410,7 @@ mod tests {
 		DBReceiptProvider {
 			pool,
 			block_provider: Arc::new(MockBlockInfoProvider {}),
-			receipt_extractor: ReceiptExtractor::new(1_000_000),
+			receipt_extractor: ReceiptExtractor::new(1_000_000, None),
 			prune_old_blocks: true,
 		}
 	}
diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs
index d7e16dea4564361b0dfa21ab1cea61ea08004e17..a422d266427d274848872653ca64b975fe622658 100644
--- a/substrate/frame/revive/src/exec.rs
+++ b/substrate/frame/revive/src/exec.rs
@@ -56,6 +56,9 @@ use sp_runtime::{
 	DispatchError, SaturatedConversion,
 };
 
+#[cfg(test)]
+mod tests;
+
 pub type AccountIdOf<T> = <T as frame_system::Config>::AccountId;
 pub type MomentOf<T> = <<T as Config>::Time as Time>::Moment;
 pub type ExecResult = Result<ExecReturnValue, ExecError>;
@@ -1833,3004 +1836,3 @@ mod sealing {
 
 	impl<'a, T: Config, E> Sealed for Stack<'a, T, E> {}
 }
-
-/// These tests exercise the executive layer.
-///
-/// In these tests the VM/loader are mocked. Instead of dealing with wasm bytecode they use simple
-/// closures. This allows you to tackle executive logic more thoroughly without writing a
-/// wasm VM code.
-#[cfg(test)]
-mod tests {
-	use super::*;
-	use crate::{
-		exec::ExportedFunction::*,
-		gas::GasMeter,
-		test_utils::*,
-		tests::{
-			test_utils::{get_balance, place_contract, set_balance},
-			ExtBuilder, RuntimeCall, RuntimeEvent as MetaEvent, Test, TestFilter,
-		},
-		AddressMapper, Error,
-	};
-	use assert_matches::assert_matches;
-	use frame_support::{assert_err, assert_noop, assert_ok, parameter_types};
-	use frame_system::{AccountInfo, EventRecord, Phase};
-	use pallet_revive_uapi::ReturnFlags;
-	use pretty_assertions::assert_eq;
-	use sp_io::hashing::keccak_256;
-	use sp_runtime::{traits::Hash, DispatchError};
-	use std::{cell::RefCell, collections::hash_map::HashMap, rc::Rc};
-
-	type System = frame_system::Pallet<Test>;
-
-	type MockStack<'a> = Stack<'a, Test, MockExecutable>;
-
-	parameter_types! {
-		static Loader: MockLoader = MockLoader::default();
-	}
-
-	fn events() -> Vec<Event<Test>> {
-		System::events()
-			.into_iter()
-			.filter_map(|meta| match meta.event {
-				MetaEvent::Contracts(contract_event) => Some(contract_event),
-				_ => None,
-			})
-			.collect()
-	}
-
-	struct MockCtx<'a> {
-		ext: &'a mut MockStack<'a>,
-		input_data: Vec<u8>,
-	}
-
-	#[derive(Clone)]
-	struct MockExecutable {
-		func: Rc<dyn for<'a> Fn(MockCtx<'a>, &Self) -> ExecResult + 'static>,
-		constructor: Rc<dyn for<'a> Fn(MockCtx<'a>, &Self) -> ExecResult + 'static>,
-		code_hash: H256,
-		code_info: CodeInfo<Test>,
-	}
-
-	#[derive(Default, Clone)]
-	pub struct MockLoader {
-		map: HashMap<H256, MockExecutable>,
-		counter: u64,
-	}
-
-	impl MockLoader {
-		fn code_hashes() -> Vec<H256> {
-			Loader::get().map.keys().copied().collect()
-		}
-
-		fn insert(
-			func_type: ExportedFunction,
-			f: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static,
-		) -> H256 {
-			Loader::mutate(|loader| {
-				// Generate code hashes from contract index value.
-				let hash = H256(keccak_256(&loader.counter.to_le_bytes()));
-				loader.counter += 1;
-				if func_type == ExportedFunction::Constructor {
-					loader.map.insert(
-						hash,
-						MockExecutable {
-							func: Rc::new(|_, _| exec_success()),
-							constructor: Rc::new(f),
-							code_hash: hash,
-							code_info: CodeInfo::<Test>::new(ALICE),
-						},
-					);
-				} else {
-					loader.map.insert(
-						hash,
-						MockExecutable {
-							func: Rc::new(f),
-							constructor: Rc::new(|_, _| exec_success()),
-							code_hash: hash,
-							code_info: CodeInfo::<Test>::new(ALICE),
-						},
-					);
-				}
-				hash
-			})
-		}
-
-		fn insert_both(
-			constructor: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static,
-			call: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static,
-		) -> H256 {
-			Loader::mutate(|loader| {
-				// Generate code hashes from contract index value.
-				let hash = H256(keccak_256(&loader.counter.to_le_bytes()));
-				loader.counter += 1;
-				loader.map.insert(
-					hash,
-					MockExecutable {
-						func: Rc::new(call),
-						constructor: Rc::new(constructor),
-						code_hash: hash,
-						code_info: CodeInfo::<Test>::new(ALICE),
-					},
-				);
-				hash
-			})
-		}
-	}
-
-	impl Executable<Test> for MockExecutable {
-		fn from_storage(
-			code_hash: H256,
-			_gas_meter: &mut GasMeter<Test>,
-		) -> Result<Self, DispatchError> {
-			Loader::mutate(|loader| {
-				loader.map.get(&code_hash).cloned().ok_or(Error::<Test>::CodeNotFound.into())
-			})
-		}
-
-		fn execute<E: Ext<T = Test>>(
-			self,
-			ext: &mut E,
-			function: ExportedFunction,
-			input_data: Vec<u8>,
-		) -> ExecResult {
-			// # Safety
-			//
-			// We know that we **always** call execute with a `MockStack` in this test.
-			//
-			// # Note
-			//
-			// The transmute is necessary because `execute` has to be generic over all
-			// `E: Ext`. However, `MockExecutable` can't be generic over `E` as it would
-			// constitute a cycle.
-			let ext = unsafe { mem::transmute(ext) };
-			if function == ExportedFunction::Constructor {
-				(self.constructor)(MockCtx { ext, input_data }, &self)
-			} else {
-				(self.func)(MockCtx { ext, input_data }, &self)
-			}
-		}
-
-		fn code(&self) -> &[u8] {
-			// The mock executable doesn't have code", so we return the code hash.
-			self.code_hash.as_ref()
-		}
-
-		fn code_hash(&self) -> &H256 {
-			&self.code_hash
-		}
-
-		fn code_info(&self) -> &CodeInfo<Test> {
-			&self.code_info
-		}
-	}
-
-	fn exec_success() -> ExecResult {
-		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
-	}
-
-	fn exec_trapped() -> ExecResult {
-		Err(ExecError { error: <Error<Test>>::ContractTrapped.into(), origin: ErrorOrigin::Callee })
-	}
-
-	#[test]
-	fn it_works() {
-		parameter_types! {
-			static TestData: Vec<usize> = vec![0];
-		}
-
-		let value = Default::default();
-		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-		let exec_ch = MockLoader::insert(Call, |_ctx, _executable| {
-			TestData::mutate(|data| data.push(1));
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, exec_ch);
-			let mut storage_meter =
-				storage::meter::Meter::new(&Origin::from_account_id(ALICE), 0, value).unwrap();
-
-			assert_matches!(
-				MockStack::run_call(
-					Origin::from_account_id(ALICE),
-					BOB_ADDR,
-					&mut gas_meter,
-					&mut storage_meter,
-					value.into(),
-					vec![],
-					false,
-				),
-				Ok(_)
-			);
-		});
-
-		assert_eq!(TestData::get(), vec![0, 1]);
-	}
-
-	#[test]
-	fn transfer_works() {
-		// This test verifies that a contract is able to transfer
-		// some funds to another account.
-		ExtBuilder::default().build().execute_with(|| {
-			set_balance(&ALICE, 100);
-			set_balance(&BOB, 0);
-
-			let origin = Origin::from_account_id(ALICE);
-			MockStack::transfer(&origin, &ALICE, &BOB, 55u64.into()).unwrap();
-
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-			assert_eq!(get_balance(&ALICE), 45 - min_balance);
-			assert_eq!(get_balance(&BOB), 55 + min_balance);
-		});
-	}
-
-	#[test]
-	fn transfer_to_nonexistent_account_works() {
-		// This test verifies that a contract is able to transfer
-		// some funds to a nonexistant account and that those transfers
-		// are not able to reap accounts.
-		ExtBuilder::default().build().execute_with(|| {
-			let ed = <Test as Config>::Currency::minimum_balance();
-			let value = 1024;
-
-			// Transfers to nonexistant accounts should work
-			set_balance(&ALICE, ed * 2);
-			set_balance(&BOB, ed + value);
-
-			assert_ok!(MockStack::transfer(
-				&Origin::from_account_id(ALICE),
-				&BOB,
-				&CHARLIE,
-				value.into()
-			));
-			assert_eq!(get_balance(&ALICE), ed);
-			assert_eq!(get_balance(&BOB), ed);
-			assert_eq!(get_balance(&CHARLIE), ed + value);
-
-			// Do not reap the origin account
-			set_balance(&ALICE, ed);
-			set_balance(&BOB, ed + value);
-			assert_err!(
-				MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &DJANGO, value.into()),
-				<Error<Test>>::TransferFailed
-			);
-
-			// Do not reap the sender account
-			set_balance(&ALICE, ed * 2);
-			set_balance(&BOB, value);
-			assert_err!(
-				MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &EVE, value.into()),
-				<Error<Test>>::TransferFailed
-			);
-			// The ED transfer would work. But it should only be executed with the actual transfer
-			assert!(!System::account_exists(&EVE));
-		});
-	}
-
-	#[test]
-	fn correct_transfer_on_call() {
-		let value = 55;
-
-		let success_ch = MockLoader::insert(Call, move |ctx, _| {
-			assert_eq!(ctx.ext.value_transferred(), U256::from(value));
-			Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, success_ch);
-			set_balance(&ALICE, 100);
-			let balance = get_balance(&BOB_FALLBACK);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, value).unwrap();
-
-			let _ = MockStack::run_call(
-				origin.clone(),
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				value.into(),
-				vec![],
-				false,
-			)
-			.unwrap();
-
-			assert_eq!(get_balance(&ALICE), 100 - value);
-			assert_eq!(get_balance(&BOB_FALLBACK), balance + value);
-		});
-	}
-
-	#[test]
-	fn correct_transfer_on_delegate_call() {
-		let value = 35;
-
-		let success_ch = MockLoader::insert(Call, move |ctx, _| {
-			assert_eq!(ctx.ext.value_transferred(), U256::from(value));
-			Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
-		});
-
-		let delegate_ch = MockLoader::insert(Call, move |ctx, _| {
-			assert_eq!(ctx.ext.value_transferred(), U256::from(value));
-			let _ =
-				ctx.ext.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())?;
-			Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, delegate_ch);
-			place_contract(&CHARLIE, success_ch);
-			set_balance(&ALICE, 100);
-			let balance = get_balance(&BOB_FALLBACK);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap();
-
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				value.into(),
-				vec![],
-				false,
-			));
-
-			assert_eq!(get_balance(&ALICE), 100 - value);
-			assert_eq!(get_balance(&BOB_FALLBACK), balance + value);
-		});
-	}
-
-	#[test]
-	fn delegate_call_missing_contract() {
-		let missing_ch = MockLoader::insert(Call, move |_ctx, _| {
-			Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
-		});
-
-		let delegate_ch = MockLoader::insert(Call, move |ctx, _| {
-			let _ =
-				ctx.ext.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())?;
-			Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, delegate_ch);
-			set_balance(&ALICE, 100);
-
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap();
-
-			// contract code missing
-			assert_noop!(
-				MockStack::run_call(
-					origin.clone(),
-					BOB_ADDR,
-					&mut GasMeter::<Test>::new(GAS_LIMIT),
-					&mut storage_meter,
-					U256::zero(),
-					vec![],
-					false,
-				),
-				ExecError {
-					error: Error::<Test>::CodeNotFound.into(),
-					origin: ErrorOrigin::Callee,
-				}
-			);
-
-			// add missing contract code
-			place_contract(&CHARLIE, missing_ch);
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			));
-		});
-	}
-
-	#[test]
-	fn changes_are_reverted_on_failing_call() {
-		// This test verifies that changes are reverted on a call which fails (or equally, returns
-		// a non-zero status code).
-
-		let return_ch = MockLoader::insert(Call, |_, _| {
-			Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() })
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, return_ch);
-			set_balance(&ALICE, 100);
-			let balance = get_balance(&BOB);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap();
-
-			let output = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				55u64.into(),
-				vec![],
-				false,
-			)
-			.unwrap();
-
-			assert!(output.did_revert());
-			assert_eq!(get_balance(&ALICE), 100);
-			assert_eq!(get_balance(&BOB), balance);
-		});
-	}
-
-	#[test]
-	fn balance_too_low() {
-		// This test verifies that a contract can't send value if it's
-		// balance is too low.
-		let from = ALICE;
-		let origin = Origin::from_account_id(ALICE);
-		let dest = BOB;
-
-		ExtBuilder::default().build().execute_with(|| {
-			set_balance(&from, 0);
-
-			let result = MockStack::transfer(&origin, &from, &dest, 100u64.into());
-
-			assert_eq!(result, Err(Error::<Test>::TransferFailed.into()));
-			assert_eq!(get_balance(&from), 0);
-			assert_eq!(get_balance(&dest), 0);
-		});
-	}
-
-	#[test]
-	fn output_is_returned_on_success() {
-		// Verifies that if a contract returns data with a successful exit status, this data
-		// is returned from the execution context.
-		let return_ch = MockLoader::insert(Call, |_, _| {
-			Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] })
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			place_contract(&BOB, return_ch);
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			);
-
-			let output = result.unwrap();
-			assert!(!output.did_revert());
-			assert_eq!(output.data, vec![1, 2, 3, 4]);
-		});
-	}
-
-	#[test]
-	fn output_is_returned_on_failure() {
-		// Verifies that if a contract returns data with a failing exit status, this data
-		// is returned from the execution context.
-		let return_ch = MockLoader::insert(Call, |_, _| {
-			Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] })
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, return_ch);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			);
-
-			let output = result.unwrap();
-			assert!(output.did_revert());
-			assert_eq!(output.data, vec![1, 2, 3, 4]);
-		});
-	}
-
-	#[test]
-	fn input_data_to_call() {
-		let input_data_ch = MockLoader::insert(Call, |ctx, _| {
-			assert_eq!(ctx.input_data, &[1, 2, 3, 4]);
-			exec_success()
-		});
-
-		// This one tests passing the input data into a contract via call.
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, input_data_ch);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![1, 2, 3, 4],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn input_data_to_instantiate() {
-		let input_data_ch = MockLoader::insert(Constructor, |ctx, _| {
-			assert_eq!(ctx.input_data, &[1, 2, 3, 4]);
-			exec_success()
-		});
-
-		// This one tests passing the input data into a contract via instantiate.
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.build()
-			.execute_with(|| {
-				let min_balance = <Test as Config>::Currency::minimum_balance();
-				let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-				let executable =
-					MockExecutable::from_storage(input_data_ch, &mut gas_meter).unwrap();
-				set_balance(&ALICE, min_balance * 10_000);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter =
-					storage::meter::Meter::new(&origin, deposit_limit::<Test>(), min_balance)
-						.unwrap();
-
-				let result = MockStack::run_instantiate(
-					ALICE,
-					executable,
-					&mut gas_meter,
-					&mut storage_meter,
-					min_balance.into(),
-					vec![1, 2, 3, 4],
-					Some(&[0; 32]),
-					false,
-				);
-				assert_matches!(result, Ok(_));
-			});
-	}
-
-	#[test]
-	fn max_depth() {
-		// This test verifies that when we reach the maximal depth creation of an
-		// yet another context fails.
-		parameter_types! {
-			static ReachedBottom: bool = false;
-		}
-		let value = Default::default();
-		let recurse_ch = MockLoader::insert(Call, |ctx, _| {
-			// Try to call into yourself.
-			let r = ctx.ext.call(
-				Weight::zero(),
-				U256::zero(),
-				&BOB_ADDR,
-				U256::zero(),
-				vec![],
-				true,
-				false,
-			);
-
-			ReachedBottom::mutate(|reached_bottom| {
-				if !*reached_bottom {
-					// We are first time here, it means we just reached bottom.
-					// Verify that we've got proper error and set `reached_bottom`.
-					assert_eq!(r, Err(Error::<Test>::MaxCallDepthReached.into()));
-					*reached_bottom = true;
-				} else {
-					// We just unwinding stack here.
-					assert_matches!(r, Ok(_));
-				}
-			});
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			set_balance(&BOB, 1);
-			place_contract(&BOB, recurse_ch);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, value).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				value.into(),
-				vec![],
-				false,
-			);
-
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn caller_returns_proper_values() {
-		parameter_types! {
-			static WitnessedCallerBob: Option<H160> = None;
-			static WitnessedCallerCharlie: Option<H160> = None;
-		}
-
-		let bob_ch = MockLoader::insert(Call, |ctx, _| {
-			// Record the caller for bob.
-			WitnessedCallerBob::mutate(|caller| {
-				let origin = ctx.ext.caller();
-				*caller =
-					Some(<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_address(
-						&origin.account_id().unwrap(),
-					));
-			});
-
-			// Call into CHARLIE contract.
-			assert_matches!(
-				ctx.ext.call(
-					Weight::zero(),
-					U256::zero(),
-					&CHARLIE_ADDR,
-					U256::zero(),
-					vec![],
-					true,
-					false
-				),
-				Ok(_)
-			);
-			exec_success()
-		});
-		let charlie_ch = MockLoader::insert(Call, |ctx, _| {
-			// Record the caller for charlie.
-			WitnessedCallerCharlie::mutate(|caller| {
-				let origin = ctx.ext.caller();
-				*caller =
-					Some(<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_address(
-						&origin.account_id().unwrap(),
-					));
-			});
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, bob_ch);
-			place_contract(&CHARLIE, charlie_ch);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			);
-
-			assert_matches!(result, Ok(_));
-		});
-
-		assert_eq!(WitnessedCallerBob::get(), Some(ALICE_ADDR));
-		assert_eq!(WitnessedCallerCharlie::get(), Some(BOB_ADDR));
-	}
-
-	#[test]
-	fn origin_returns_proper_values() {
-		parameter_types! {
-			static WitnessedCallerBob: Option<H160> = None;
-			static WitnessedCallerCharlie: Option<H160> = None;
-		}
-
-		let bob_ch = MockLoader::insert(Call, |ctx, _| {
-			// Record the origin for bob.
-			WitnessedCallerBob::mutate(|witness| {
-				let origin = ctx.ext.origin();
-				*witness = Some(<Test as Config>::AddressMapper::to_address(
-					&origin.account_id().unwrap(),
-				));
-			});
-
-			// Call into CHARLIE contract.
-			assert_matches!(
-				ctx.ext.call(
-					Weight::zero(),
-					U256::zero(),
-					&CHARLIE_ADDR,
-					U256::zero(),
-					vec![],
-					true,
-					false
-				),
-				Ok(_)
-			);
-			exec_success()
-		});
-		let charlie_ch = MockLoader::insert(Call, |ctx, _| {
-			// Record the origin for charlie.
-			WitnessedCallerCharlie::mutate(|witness| {
-				let origin = ctx.ext.origin();
-				*witness = Some(<Test as Config>::AddressMapper::to_address(
-					&origin.account_id().unwrap(),
-				));
-			});
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, bob_ch);
-			place_contract(&CHARLIE, charlie_ch);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			);
-
-			assert_matches!(result, Ok(_));
-		});
-
-		assert_eq!(WitnessedCallerBob::get(), Some(ALICE_ADDR));
-		assert_eq!(WitnessedCallerCharlie::get(), Some(ALICE_ADDR));
-	}
-
-	#[test]
-	fn is_contract_returns_proper_values() {
-		let bob_ch = MockLoader::insert(Call, |ctx, _| {
-			// Verify that BOB is a contract
-			assert!(ctx.ext.is_contract(&BOB_ADDR));
-			// Verify that ALICE is not a contract
-			assert!(!ctx.ext.is_contract(&ALICE_ADDR));
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, bob_ch);
-
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn to_account_id_returns_proper_values() {
-		let bob_code_hash = MockLoader::insert(Call, |ctx, _| {
-			let alice_account_id = <Test as Config>::AddressMapper::to_account_id(&ALICE_ADDR);
-			assert_eq!(ctx.ext.to_account_id(&ALICE_ADDR), alice_account_id);
-
-			const UNMAPPED_ADDR: H160 = H160([99u8; 20]);
-			let mut unmapped_fallback_account_id = [0xEE; 32];
-			unmapped_fallback_account_id[..20].copy_from_slice(UNMAPPED_ADDR.as_bytes());
-			assert_eq!(
-				ctx.ext.to_account_id(&UNMAPPED_ADDR),
-				AccountId32::new(unmapped_fallback_account_id)
-			);
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, bob_code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn code_hash_returns_proper_values() {
-		let bob_code_hash = MockLoader::insert(Call, |ctx, _| {
-			// ALICE is not a contract but account exists so it returns hash of empty data
-			assert_eq!(ctx.ext.code_hash(&ALICE_ADDR), EMPTY_CODE_HASH);
-			// BOB is a contract (this function) and hence it has a code_hash.
-			// `MockLoader` uses contract index to generate the code hash.
-			assert_eq!(ctx.ext.code_hash(&BOB_ADDR), H256(keccak_256(&0u64.to_le_bytes())));
-			// [0xff;20] doesn't exist and returns hash zero
-			assert!(ctx.ext.code_hash(&H160([0xff; 20])).is_zero());
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			// add alice account info to test case EOA code hash
-			frame_system::Account::<Test>::insert(
-				<Test as Config>::AddressMapper::to_account_id(&ALICE_ADDR),
-				AccountInfo { consumers: 1, providers: 1, ..Default::default() },
-			);
-			place_contract(&BOB, bob_code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			// ALICE (not contract) -> BOB (contract)
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn own_code_hash_returns_proper_values() {
-		let bob_ch = MockLoader::insert(Call, |ctx, _| {
-			let code_hash = ctx.ext.code_hash(&BOB_ADDR);
-			assert_eq!(*ctx.ext.own_code_hash(), code_hash);
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, bob_ch);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			// ALICE (not contract) -> BOB (contract)
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn caller_is_origin_returns_proper_values() {
-		let code_charlie = MockLoader::insert(Call, |ctx, _| {
-			// BOB is not the origin of the stack call
-			assert!(!ctx.ext.caller_is_origin());
-			exec_success()
-		});
-
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			// ALICE is the origin of the call stack
-			assert!(ctx.ext.caller_is_origin());
-			// BOB calls CHARLIE
-			ctx.ext
-				.call(
-					Weight::zero(),
-					U256::zero(),
-					&CHARLIE_ADDR,
-					U256::zero(),
-					vec![],
-					true,
-					false,
-				)
-				.map(|_| ctx.ext.last_frame_output().clone())
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			place_contract(&CHARLIE, code_charlie);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			// ALICE -> BOB (caller is origin) -> CHARLIE (caller is not origin)
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn root_caller_succeeds() {
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			// root is the origin of the call stack.
-			assert!(ctx.ext.caller_is_root());
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			let origin = Origin::Root;
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			// root -> BOB (caller is root)
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn root_caller_does_not_succeed_when_value_not_zero() {
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			// root is the origin of the call stack.
-			assert!(ctx.ext.caller_is_root());
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			let origin = Origin::Root;
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			// root -> BOB (caller is root)
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				1u64.into(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Err(_));
-		});
-	}
-
-	#[test]
-	fn root_caller_succeeds_with_consecutive_calls() {
-		let code_charlie = MockLoader::insert(Call, |ctx, _| {
-			// BOB is not root, even though the origin is root.
-			assert!(!ctx.ext.caller_is_root());
-			exec_success()
-		});
-
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			// root is the origin of the call stack.
-			assert!(ctx.ext.caller_is_root());
-			// BOB calls CHARLIE.
-			ctx.ext
-				.call(
-					Weight::zero(),
-					U256::zero(),
-					&CHARLIE_ADDR,
-					U256::zero(),
-					vec![],
-					true,
-					false,
-				)
-				.map(|_| ctx.ext.last_frame_output().clone())
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			place_contract(&CHARLIE, code_charlie);
-			let origin = Origin::Root;
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			// root -> BOB (caller is root) -> CHARLIE (caller is not root)
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn address_returns_proper_values() {
-		let bob_ch = MockLoader::insert(Call, |ctx, _| {
-			// Verify that address matches BOB.
-			assert_eq!(ctx.ext.address(), BOB_ADDR);
-
-			// Call into charlie contract.
-			assert_matches!(
-				ctx.ext.call(
-					Weight::zero(),
-					U256::zero(),
-					&CHARLIE_ADDR,
-					U256::zero(),
-					vec![],
-					true,
-					false
-				),
-				Ok(_)
-			);
-			exec_success()
-		});
-		let charlie_ch = MockLoader::insert(Call, |ctx, _| {
-			assert_eq!(ctx.ext.address(), CHARLIE_ADDR);
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, bob_ch);
-			place_contract(&CHARLIE, charlie_ch);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			);
-
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn refuse_instantiate_with_value_below_existential_deposit() {
-		let dummy_ch = MockLoader::insert(Constructor, |_, _| exec_success());
-
-		ExtBuilder::default().existential_deposit(15).build().execute_with(|| {
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap();
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			assert_matches!(
-				MockStack::run_instantiate(
-					ALICE,
-					executable,
-					&mut gas_meter,
-					&mut storage_meter,
-					U256::zero(), // <- zero value
-					vec![],
-					Some(&[0; 32]),
-					false,
-				),
-				Err(_)
-			);
-		});
-	}
-
-	#[test]
-	fn instantiation_work_with_success_output() {
-		let dummy_ch = MockLoader::insert(Constructor, |_, _| {
-			Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] })
-		});
-
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				let min_balance = <Test as Config>::Currency::minimum_balance();
-				let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-				let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap();
-				set_balance(&ALICE, min_balance * 1000);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter =
-					storage::meter::Meter::new(&origin, min_balance * 100, min_balance).unwrap();
-
-				let instantiated_contract_address = assert_matches!(
-					MockStack::run_instantiate(
-						ALICE,
-						executable,
-						&mut gas_meter,
-						&mut storage_meter,
-						min_balance.into(),
-						vec![],
-						Some(&[0 ;32]),
-						false,
-					),
-					Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address
-				);
-				let instantiated_contract_id = <<Test as Config>::AddressMapper as AddressMapper<
-					Test,
-				>>::to_fallback_account_id(
-					&instantiated_contract_address
-				);
-
-				// Check that the newly created account has the expected code hash and
-				// there are instantiation event.
-				assert_eq!(
-					ContractInfo::<Test>::load_code_hash(&instantiated_contract_id).unwrap(),
-					dummy_ch
-				);
-			});
-	}
-
-	#[test]
-	fn instantiation_fails_with_failing_output() {
-		let dummy_ch = MockLoader::insert(Constructor, |_, _| {
-			Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] })
-		});
-
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				let min_balance = <Test as Config>::Currency::minimum_balance();
-				let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-				let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap();
-				set_balance(&ALICE, min_balance * 1000);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter =
-					storage::meter::Meter::new(&origin, min_balance * 100, min_balance).unwrap();
-
-				let instantiated_contract_address = assert_matches!(
-					MockStack::run_instantiate(
-						ALICE,
-						executable,
-						&mut gas_meter,
-						&mut storage_meter,
-						min_balance.into(),
-						vec![],
-						Some(&[0; 32]),
-						false,
-					),
-					Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address
-				);
-
-				let instantiated_contract_id = <<Test as Config>::AddressMapper as AddressMapper<
-					Test,
-				>>::to_fallback_account_id(
-					&instantiated_contract_address
-				);
-
-				// Check that the account has not been created.
-				assert!(ContractInfo::<Test>::load_code_hash(&instantiated_contract_id).is_none());
-				assert!(events().is_empty());
-			});
-	}
-
-	#[test]
-	fn instantiation_from_contract() {
-		let dummy_ch = MockLoader::insert(Call, |_, _| exec_success());
-		let instantiated_contract_address = Rc::new(RefCell::new(None::<H160>));
-		let instantiator_ch = MockLoader::insert(Call, {
-			let instantiated_contract_address = Rc::clone(&instantiated_contract_address);
-			move |ctx, _| {
-				// Instantiate a contract and save it's address in `instantiated_contract_address`.
-				let (address, output) = ctx
-					.ext
-					.instantiate(
-						Weight::MAX,
-						U256::MAX,
-						dummy_ch,
-						<Test as Config>::Currency::minimum_balance().into(),
-						vec![],
-						Some(&[48; 32]),
-					)
-					.map(|address| (address, ctx.ext.last_frame_output().clone()))
-					.unwrap();
-
-				*instantiated_contract_address.borrow_mut() = Some(address);
-				Ok(output)
-			}
-		});
-
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				let min_balance = <Test as Config>::Currency::minimum_balance();
-				set_balance(&ALICE, min_balance * 100);
-				place_contract(&BOB, instantiator_ch);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter =
-					storage::meter::Meter::new(&origin, min_balance * 10, min_balance * 10)
-						.unwrap();
-
-				assert_matches!(
-					MockStack::run_call(
-						origin,
-						BOB_ADDR,
-						&mut GasMeter::<Test>::new(GAS_LIMIT),
-						&mut storage_meter,
-						(min_balance * 10).into(),
-						vec![],
-						false,
-					),
-					Ok(_)
-				);
-
-				let instantiated_contract_address =
-					*instantiated_contract_address.borrow().as_ref().unwrap();
-
-				let instantiated_contract_id = <<Test as Config>::AddressMapper as AddressMapper<
-					Test,
-				>>::to_fallback_account_id(
-					&instantiated_contract_address
-				);
-
-				// Check that the newly created account has the expected code hash and
-				// there are instantiation event.
-				assert_eq!(
-					ContractInfo::<Test>::load_code_hash(&instantiated_contract_id).unwrap(),
-					dummy_ch
-				);
-			});
-	}
-
-	#[test]
-	fn instantiation_traps() {
-		let dummy_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into()));
-		let instantiator_ch = MockLoader::insert(Call, {
-			move |ctx, _| {
-				// Instantiate a contract and save it's address in `instantiated_contract_address`.
-				assert_matches!(
-					ctx.ext.instantiate(
-						Weight::zero(),
-						U256::zero(),
-						dummy_ch,
-						<Test as Config>::Currency::minimum_balance().into(),
-						vec![],
-						Some(&[0; 32]),
-					),
-					Err(ExecError {
-						error: DispatchError::Other("It's a trap!"),
-						origin: ErrorOrigin::Callee,
-					})
-				);
-
-				exec_success()
-			}
-		});
-
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				set_balance(&ALICE, 1000);
-				set_balance(&BOB_FALLBACK, 100);
-				place_contract(&BOB, instantiator_ch);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
-
-				assert_matches!(
-					MockStack::run_call(
-						origin,
-						BOB_ADDR,
-						&mut GasMeter::<Test>::new(GAS_LIMIT),
-						&mut storage_meter,
-						U256::zero(),
-						vec![],
-						false,
-					),
-					Ok(_)
-				);
-			});
-	}
-
-	#[test]
-	fn termination_from_instantiate_fails() {
-		let terminate_ch = MockLoader::insert(Constructor, |ctx, _| {
-			ctx.ext.terminate(&ALICE_ADDR)?;
-			exec_success()
-		});
-
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-				let executable =
-					MockExecutable::from_storage(terminate_ch, &mut gas_meter).unwrap();
-				set_balance(&ALICE, 10_000);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter =
-					storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 100).unwrap();
-
-				assert_eq!(
-					MockStack::run_instantiate(
-						ALICE,
-						executable,
-						&mut gas_meter,
-						&mut storage_meter,
-						100u64.into(),
-						vec![],
-						Some(&[0; 32]),
-						false,
-					),
-					Err(ExecError {
-						error: Error::<Test>::TerminatedInConstructor.into(),
-						origin: ErrorOrigin::Callee
-					})
-				);
-
-				assert_eq!(&events(), &[]);
-			});
-	}
-
-	#[test]
-	fn in_memory_changes_not_discarded() {
-		// Call stack: BOB -> CHARLIE (trap) -> BOB' (success)
-		// This tests verifies some edge case of the contract info cache:
-		// We change some value in our contract info before calling into a contract
-		// that calls into ourself. This triggers a case where BOBs contract info
-		// is written to storage and invalidated by the successful execution of BOB'.
-		// The trap of CHARLIE reverts the storage changes to BOB. When the root BOB regains
-		// control it reloads its contract info from storage. We check that changes that
-		// are made before calling into CHARLIE are not discarded.
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			if ctx.input_data[0] == 0 {
-				let info = ctx.ext.contract_info();
-				assert_eq!(info.storage_byte_deposit, 0);
-				info.storage_byte_deposit = 42;
-				assert_eq!(
-					ctx.ext
-						.call(
-							Weight::zero(),
-							U256::zero(),
-							&CHARLIE_ADDR,
-							U256::zero(),
-							vec![],
-							true,
-							false
-						)
-						.map(|_| ctx.ext.last_frame_output().clone()),
-					exec_trapped()
-				);
-				assert_eq!(ctx.ext.contract_info().storage_byte_deposit, 42);
-			}
-			exec_success()
-		});
-		let code_charlie = MockLoader::insert(Call, |ctx, _| {
-			assert!(ctx
-				.ext
-				.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false)
-				.is_ok());
-			exec_trapped()
-		});
-
-		// This one tests passing the input data into a contract via call.
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			place_contract(&CHARLIE, code_charlie);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn recursive_call_during_constructor_is_balance_transfer() {
-		let code = MockLoader::insert(Constructor, |ctx, _| {
-			let account_id = ctx.ext.account_id().clone();
-			let addr =
-				<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_address(&account_id);
-			let balance = ctx.ext.balance();
-
-			// Calling ourselves during the constructor will trigger a balance
-			// transfer since no contract exist yet.
-			assert_ok!(ctx.ext.call(
-				Weight::zero(),
-				U256::zero(),
-				&addr,
-				(balance - 1).into(),
-				vec![],
-				true,
-				false
-			));
-
-			// Should also work with call data set as it is ignored when no
-			// contract is deployed.
-			assert_ok!(ctx.ext.call(
-				Weight::zero(),
-				U256::zero(),
-				&addr,
-				1u32.into(),
-				vec![1, 2, 3, 4],
-				true,
-				false
-			));
-			exec_success()
-		});
-
-		// This one tests passing the input data into a contract via instantiate.
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.build()
-			.execute_with(|| {
-				let min_balance = <Test as Config>::Currency::minimum_balance();
-				let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-				let executable = MockExecutable::from_storage(code, &mut gas_meter).unwrap();
-				set_balance(&ALICE, min_balance * 10_000);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter =
-					storage::meter::Meter::new(&origin, deposit_limit::<Test>(), min_balance)
-						.unwrap();
-
-				let result = MockStack::run_instantiate(
-					ALICE,
-					executable,
-					&mut gas_meter,
-					&mut storage_meter,
-					10u64.into(),
-					vec![],
-					Some(&[0; 32]),
-					false,
-				);
-				assert_matches!(result, Ok(_));
-			});
-	}
-
-	#[test]
-	fn cannot_send_more_balance_than_available_to_self() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			let account_id = ctx.ext.account_id().clone();
-			let addr =
-				<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_address(&account_id);
-			let balance = ctx.ext.balance();
-
-			assert_err!(
-				ctx.ext.call(
-					Weight::zero(),
-					U256::zero(),
-					&addr,
-					(balance + 1).into(),
-					vec![],
-					true,
-					false
-				),
-				<Error<Test>>::TransferFailed,
-			);
-			exec_success()
-		});
-
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.build()
-			.execute_with(|| {
-				let min_balance = <Test as Config>::Currency::minimum_balance();
-				let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-				set_balance(&ALICE, min_balance * 10);
-				place_contract(&BOB, code_hash);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-				MockStack::run_call(
-					origin,
-					BOB_ADDR,
-					&mut gas_meter,
-					&mut storage_meter,
-					U256::zero(),
-					vec![],
-					false,
-				)
-				.unwrap();
-			});
-	}
-
-	#[test]
-	fn call_reentry_direct_recursion() {
-		// call the contract passed as input with disabled reentry
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			let dest = H160::from_slice(ctx.input_data.as_ref());
-			ctx.ext
-				.call(Weight::zero(), U256::zero(), &dest, U256::zero(), vec![], false, false)
-				.map(|_| ctx.ext.last_frame_output().clone())
-		});
-
-		let code_charlie = MockLoader::insert(Call, |_, _| exec_success());
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			place_contract(&CHARLIE, code_charlie);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			// Calling another contract should succeed
-			assert_ok!(MockStack::run_call(
-				origin.clone(),
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				CHARLIE_ADDR.as_bytes().to_vec(),
-				false,
-			));
-
-			// Calling into oneself fails
-			assert_err!(
-				MockStack::run_call(
-					origin,
-					BOB_ADDR,
-					&mut GasMeter::<Test>::new(GAS_LIMIT),
-					&mut storage_meter,
-					U256::zero(),
-					BOB_ADDR.as_bytes().to_vec(),
-					false,
-				)
-				.map_err(|e| e.error),
-				<Error<Test>>::ReentranceDenied,
-			);
-		});
-	}
-
-	#[test]
-	fn call_deny_reentry() {
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			if ctx.input_data[0] == 0 {
-				ctx.ext
-					.call(
-						Weight::zero(),
-						U256::zero(),
-						&CHARLIE_ADDR,
-						U256::zero(),
-						vec![],
-						false,
-						false,
-					)
-					.map(|_| ctx.ext.last_frame_output().clone())
-			} else {
-				exec_success()
-			}
-		});
-
-		// call BOB with input set to '1'
-		let code_charlie = MockLoader::insert(Call, |ctx, _| {
-			ctx.ext
-				.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![1], true, false)
-				.map(|_| ctx.ext.last_frame_output().clone())
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			place_contract(&CHARLIE, code_charlie);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			// BOB -> CHARLIE -> BOB fails as BOB denies reentry.
-			assert_err!(
-				MockStack::run_call(
-					origin,
-					BOB_ADDR,
-					&mut GasMeter::<Test>::new(GAS_LIMIT),
-					&mut storage_meter,
-					U256::zero(),
-					vec![0],
-					false,
-				)
-				.map_err(|e| e.error),
-				<Error<Test>>::ReentranceDenied,
-			);
-		});
-	}
-
-	#[test]
-	fn call_runtime_works() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			let call = RuntimeCall::System(frame_system::Call::remark_with_event {
-				remark: b"Hello World".to_vec(),
-			});
-			ctx.ext.call_runtime(call).unwrap();
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 10);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			System::reset_events();
-			MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			)
-			.unwrap();
-
-			let remark_hash = <Test as frame_system::Config>::Hashing::hash(b"Hello World");
-			assert_eq!(
-				System::events(),
-				vec![EventRecord {
-					phase: Phase::Initialization,
-					event: MetaEvent::System(frame_system::Event::Remarked {
-						sender: BOB_FALLBACK,
-						hash: remark_hash
-					}),
-					topics: vec![],
-				},]
-			);
-		});
-	}
-
-	#[test]
-	fn call_runtime_filter() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			use frame_system::Call as SysCall;
-			use pallet_balances::Call as BalanceCall;
-			use pallet_utility::Call as UtilCall;
-
-			// remark should still be allowed
-			let allowed_call =
-				RuntimeCall::System(SysCall::remark_with_event { remark: b"Hello".to_vec() });
-
-			// transfers are disallowed by the `TestFiler` (see below)
-			let forbidden_call = RuntimeCall::Balances(BalanceCall::transfer_allow_death {
-				dest: CHARLIE,
-				value: 22,
-			});
-
-			// simple cases: direct call
-			assert_err!(
-				ctx.ext.call_runtime(forbidden_call.clone()),
-				frame_system::Error::<Test>::CallFiltered
-			);
-
-			// as part of a patch: return is OK (but it interrupted the batch)
-			assert_ok!(ctx.ext.call_runtime(RuntimeCall::Utility(UtilCall::batch {
-				calls: vec![allowed_call.clone(), forbidden_call, allowed_call]
-			})),);
-
-			// the transfer wasn't performed
-			assert_eq!(get_balance(&CHARLIE), 0);
-
-			exec_success()
-		});
-
-		TestFilter::set_filter(|call| match call {
-			RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) => false,
-			_ => true,
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 10);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			System::reset_events();
-			MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			)
-			.unwrap();
-
-			let remark_hash = <Test as frame_system::Config>::Hashing::hash(b"Hello");
-			assert_eq!(
-				System::events(),
-				vec![
-					EventRecord {
-						phase: Phase::Initialization,
-						event: MetaEvent::System(frame_system::Event::Remarked {
-							sender: BOB_FALLBACK,
-							hash: remark_hash
-						}),
-						topics: vec![],
-					},
-					EventRecord {
-						phase: Phase::Initialization,
-						event: MetaEvent::Utility(pallet_utility::Event::ItemCompleted),
-						topics: vec![],
-					},
-					EventRecord {
-						phase: Phase::Initialization,
-						event: MetaEvent::Utility(pallet_utility::Event::BatchInterrupted {
-							index: 1,
-							error: frame_system::Error::<Test>::CallFiltered.into()
-						},),
-						topics: vec![],
-					},
-				]
-			);
-		});
-	}
-
-	#[test]
-	fn nonce() {
-		let fail_code = MockLoader::insert(Constructor, |_, _| exec_trapped());
-		let success_code = MockLoader::insert(Constructor, |_, _| exec_success());
-		let succ_fail_code = MockLoader::insert(Constructor, move |ctx, _| {
-			ctx.ext
-				.instantiate(
-					Weight::MAX,
-					U256::MAX,
-					fail_code,
-					ctx.ext.minimum_balance() * 100,
-					vec![],
-					Some(&[0; 32]),
-				)
-				.ok();
-			exec_success()
-		});
-		let succ_succ_code = MockLoader::insert(Constructor, move |ctx, _| {
-			let alice_nonce = System::account_nonce(&ALICE);
-			assert_eq!(System::account_nonce(ctx.ext.account_id()), 0);
-			assert_eq!(ctx.ext.caller().account_id().unwrap(), &ALICE);
-			let addr = ctx
-				.ext
-				.instantiate(
-					Weight::MAX,
-					U256::MAX,
-					success_code,
-					ctx.ext.minimum_balance() * 100,
-					vec![],
-					Some(&[0; 32]),
-				)
-				.unwrap();
-
-			let account_id =
-				<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_fallback_account_id(
-					&addr,
-				);
-
-			assert_eq!(System::account_nonce(&ALICE), alice_nonce);
-			assert_eq!(System::account_nonce(ctx.ext.account_id()), 1);
-			assert_eq!(System::account_nonce(&account_id), 0);
-
-			// a plain call should not influence the account counter
-			ctx.ext
-				.call(Weight::zero(), U256::zero(), &addr, U256::zero(), vec![], false, false)
-				.unwrap();
-
-			assert_eq!(System::account_nonce(ALICE), alice_nonce);
-			assert_eq!(System::account_nonce(ctx.ext.account_id()), 1);
-			assert_eq!(System::account_nonce(&account_id), 0);
-
-			exec_success()
-		});
-
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.build()
-			.execute_with(|| {
-				let min_balance = <Test as Config>::Currency::minimum_balance();
-				let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-				let fail_executable =
-					MockExecutable::from_storage(fail_code, &mut gas_meter).unwrap();
-				let success_executable =
-					MockExecutable::from_storage(success_code, &mut gas_meter).unwrap();
-				let succ_fail_executable =
-					MockExecutable::from_storage(succ_fail_code, &mut gas_meter).unwrap();
-				let succ_succ_executable =
-					MockExecutable::from_storage(succ_succ_code, &mut gas_meter).unwrap();
-				set_balance(&ALICE, min_balance * 10_000);
-				set_balance(&BOB, min_balance * 10_000);
-				let origin = Origin::from_account_id(BOB);
-				let mut storage_meter =
-					storage::meter::Meter::new(&origin, deposit_limit::<Test>(), min_balance * 100)
-						.unwrap();
-
-				// fail should not increment
-				MockStack::run_instantiate(
-					ALICE,
-					fail_executable,
-					&mut gas_meter,
-					&mut storage_meter,
-					(min_balance * 100).into(),
-					vec![],
-					Some(&[0; 32]),
-					false,
-				)
-				.ok();
-				assert_eq!(System::account_nonce(&ALICE), 0);
-
-				assert_ok!(MockStack::run_instantiate(
-					ALICE,
-					success_executable,
-					&mut gas_meter,
-					&mut storage_meter,
-					(min_balance * 100).into(),
-					vec![],
-					Some(&[0; 32]),
-					false,
-				));
-				assert_eq!(System::account_nonce(&ALICE), 1);
-
-				assert_ok!(MockStack::run_instantiate(
-					ALICE,
-					succ_fail_executable,
-					&mut gas_meter,
-					&mut storage_meter,
-					(min_balance * 200).into(),
-					vec![],
-					Some(&[0; 32]),
-					false,
-				));
-				assert_eq!(System::account_nonce(&ALICE), 2);
-
-				assert_ok!(MockStack::run_instantiate(
-					ALICE,
-					succ_succ_executable,
-					&mut gas_meter,
-					&mut storage_meter,
-					(min_balance * 200).into(),
-					vec![],
-					Some(&[0; 32]),
-					false,
-				));
-				assert_eq!(System::account_nonce(&ALICE), 3);
-			});
-	}
-
-	#[test]
-	fn set_storage_works() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			// Write
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![4, 5, 6]), true),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(ctx.ext.set_storage(&Key::Fix([3; 32]), None, false), Ok(WriteOutcome::New));
-			assert_eq!(ctx.ext.set_storage(&Key::Fix([4; 32]), None, true), Ok(WriteOutcome::New));
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([5; 32]), Some(vec![]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([6; 32]), Some(vec![]), true),
-				Ok(WriteOutcome::New)
-			);
-
-			// Overwrite
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![42]), false),
-				Ok(WriteOutcome::Overwritten(3))
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![48]), true),
-				Ok(WriteOutcome::Taken(vec![4, 5, 6]))
-			);
-			assert_eq!(ctx.ext.set_storage(&Key::Fix([3; 32]), None, false), Ok(WriteOutcome::New));
-			assert_eq!(ctx.ext.set_storage(&Key::Fix([4; 32]), None, true), Ok(WriteOutcome::New));
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([5; 32]), Some(vec![]), false),
-				Ok(WriteOutcome::Overwritten(0))
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([6; 32]), Some(vec![]), true),
-				Ok(WriteOutcome::Taken(vec![]))
-			);
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 1000);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter =
-				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			));
-		});
-	}
-
-	#[test]
-	fn set_storage_varsized_key_works() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			// Write
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([1; 64].to_vec()).unwrap(),
-					Some(vec![1, 2, 3]),
-					false
-				),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([2; 19].to_vec()).unwrap(),
-					Some(vec![4, 5, 6]),
-					true
-				),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::try_from_var([3; 19].to_vec()).unwrap(), None, false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::try_from_var([4; 64].to_vec()).unwrap(), None, true),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([5; 30].to_vec()).unwrap(),
-					Some(vec![]),
-					false
-				),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([6; 128].to_vec()).unwrap(),
-					Some(vec![]),
-					true
-				),
-				Ok(WriteOutcome::New)
-			);
-
-			// Overwrite
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([1; 64].to_vec()).unwrap(),
-					Some(vec![42, 43, 44]),
-					false
-				),
-				Ok(WriteOutcome::Overwritten(3))
-			);
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([2; 19].to_vec()).unwrap(),
-					Some(vec![48]),
-					true
-				),
-				Ok(WriteOutcome::Taken(vec![4, 5, 6]))
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::try_from_var([3; 19].to_vec()).unwrap(), None, false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::try_from_var([4; 64].to_vec()).unwrap(), None, true),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([5; 30].to_vec()).unwrap(),
-					Some(vec![]),
-					false
-				),
-				Ok(WriteOutcome::Overwritten(0))
-			);
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([6; 128].to_vec()).unwrap(),
-					Some(vec![]),
-					true
-				),
-				Ok(WriteOutcome::Taken(vec![]))
-			);
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 1000);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter =
-				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			));
-		});
-	}
-
-	#[test]
-	fn get_storage_works() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(ctx.ext.get_storage(&Key::Fix([1; 32])), Some(vec![1, 2, 3]));
-			assert_eq!(ctx.ext.get_storage(&Key::Fix([2; 32])), Some(vec![]));
-			assert_eq!(ctx.ext.get_storage(&Key::Fix([3; 32])), None);
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 1000);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter =
-				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			));
-		});
-	}
-
-	#[test]
-	fn get_storage_size_works() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(ctx.ext.get_storage_size(&Key::Fix([1; 32])), Some(3));
-			assert_eq!(ctx.ext.get_storage_size(&Key::Fix([2; 32])), Some(0));
-			assert_eq!(ctx.ext.get_storage_size(&Key::Fix([3; 32])), None);
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 1000);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter =
-				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			));
-		});
-	}
-
-	#[test]
-	fn get_storage_varsized_key_works() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([1; 19].to_vec()).unwrap(),
-					Some(vec![1, 2, 3]),
-					false
-				),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([2; 16].to_vec()).unwrap(),
-					Some(vec![]),
-					false
-				),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.get_storage(&Key::try_from_var([1; 19].to_vec()).unwrap()),
-				Some(vec![1, 2, 3])
-			);
-			assert_eq!(
-				ctx.ext.get_storage(&Key::try_from_var([2; 16].to_vec()).unwrap()),
-				Some(vec![])
-			);
-			assert_eq!(ctx.ext.get_storage(&Key::try_from_var([3; 8].to_vec()).unwrap()), None);
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 1000);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter =
-				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			));
-		});
-	}
-
-	#[test]
-	fn get_storage_size_varsized_key_works() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([1; 19].to_vec()).unwrap(),
-					Some(vec![1, 2, 3]),
-					false
-				),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_storage(
-					&Key::try_from_var([2; 16].to_vec()).unwrap(),
-					Some(vec![]),
-					false
-				),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.get_storage_size(&Key::try_from_var([1; 19].to_vec()).unwrap()),
-				Some(3)
-			);
-			assert_eq!(
-				ctx.ext.get_storage_size(&Key::try_from_var([2; 16].to_vec()).unwrap()),
-				Some(0)
-			);
-			assert_eq!(
-				ctx.ext.get_storage_size(&Key::try_from_var([3; 8].to_vec()).unwrap()),
-				None
-			);
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 1000);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter =
-				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			));
-		});
-	}
-
-	#[test]
-	fn set_transient_storage_works() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			// Write
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([2; 32]), Some(vec![4, 5, 6]), true),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([3; 32]), None, false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([4; 32]), None, true),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([5; 32]), Some(vec![]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([6; 32]), Some(vec![]), true),
-				Ok(WriteOutcome::New)
-			);
-
-			// Overwrite
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([1; 32]), Some(vec![42]), false),
-				Ok(WriteOutcome::Overwritten(3))
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([2; 32]), Some(vec![48]), true),
-				Ok(WriteOutcome::Taken(vec![4, 5, 6]))
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([3; 32]), None, false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([4; 32]), None, true),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([5; 32]), Some(vec![]), false),
-				Ok(WriteOutcome::Overwritten(0))
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(&Key::Fix([6; 32]), Some(vec![]), true),
-				Ok(WriteOutcome::Taken(vec![]))
-			);
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter =
-				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			));
-		});
-	}
-
-	#[test]
-	fn get_transient_storage_works() {
-		// Call stack: BOB -> CHARLIE(success) -> BOB' (success)
-		let storage_key_1 = &Key::Fix([1; 32]);
-		let storage_key_2 = &Key::Fix([2; 32]);
-		let storage_key_3 = &Key::Fix([3; 32]);
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			if ctx.input_data[0] == 0 {
-				assert_eq!(
-					ctx.ext.set_transient_storage(storage_key_1, Some(vec![1, 2]), false),
-					Ok(WriteOutcome::New)
-				);
-				assert_eq!(
-					ctx.ext
-						.call(
-							Weight::zero(),
-							U256::zero(),
-							&CHARLIE_ADDR,
-							U256::zero(),
-							vec![],
-							true,
-							false,
-						)
-						.map(|_| ctx.ext.last_frame_output().clone()),
-					exec_success()
-				);
-				assert_eq!(ctx.ext.get_transient_storage(storage_key_1), Some(vec![3]));
-				assert_eq!(ctx.ext.get_transient_storage(storage_key_2), Some(vec![]));
-				assert_eq!(ctx.ext.get_transient_storage(storage_key_3), None);
-			} else {
-				assert_eq!(
-					ctx.ext.set_transient_storage(storage_key_1, Some(vec![3]), true),
-					Ok(WriteOutcome::Taken(vec![1, 2]))
-				);
-				assert_eq!(
-					ctx.ext.set_transient_storage(storage_key_2, Some(vec![]), false),
-					Ok(WriteOutcome::New)
-				);
-			}
-			exec_success()
-		});
-		let code_charlie = MockLoader::insert(Call, |ctx, _| {
-			assert!(ctx
-				.ext
-				.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false)
-				.is_ok());
-			// CHARLIE can not read BOB`s storage.
-			assert_eq!(ctx.ext.get_transient_storage(storage_key_1), None);
-			exec_success()
-		});
-
-		// This one tests passing the input data into a contract via call.
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			place_contract(&CHARLIE, code_charlie);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn get_transient_storage_size_works() {
-		let storage_key_1 = &Key::Fix([1; 32]);
-		let storage_key_2 = &Key::Fix([2; 32]);
-		let storage_key_3 = &Key::Fix([3; 32]);
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			assert_eq!(
-				ctx.ext.set_transient_storage(storage_key_1, Some(vec![1, 2, 3]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(
-				ctx.ext.set_transient_storage(storage_key_2, Some(vec![]), false),
-				Ok(WriteOutcome::New)
-			);
-			assert_eq!(ctx.ext.get_transient_storage_size(storage_key_1), Some(3));
-			assert_eq!(ctx.ext.get_transient_storage_size(storage_key_2), Some(0));
-			assert_eq!(ctx.ext.get_transient_storage_size(storage_key_3), None);
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			assert_ok!(MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			));
-		});
-	}
-
-	#[test]
-	fn rollback_transient_storage_works() {
-		// Call stack: BOB -> CHARLIE (trap) -> BOB' (success)
-		let storage_key = &Key::Fix([1; 32]);
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			if ctx.input_data[0] == 0 {
-				assert_eq!(
-					ctx.ext.set_transient_storage(storage_key, Some(vec![1, 2]), false),
-					Ok(WriteOutcome::New)
-				);
-				assert_eq!(
-					ctx.ext
-						.call(
-							Weight::zero(),
-							U256::zero(),
-							&CHARLIE_ADDR,
-							U256::zero(),
-							vec![],
-							true,
-							false
-						)
-						.map(|_| ctx.ext.last_frame_output().clone()),
-					exec_trapped()
-				);
-				assert_eq!(ctx.ext.get_transient_storage(storage_key), Some(vec![1, 2]));
-			} else {
-				let overwritten_length = ctx.ext.get_transient_storage_size(storage_key).unwrap();
-				assert_eq!(
-					ctx.ext.set_transient_storage(storage_key, Some(vec![3]), false),
-					Ok(WriteOutcome::Overwritten(overwritten_length))
-				);
-				assert_eq!(ctx.ext.get_transient_storage(storage_key), Some(vec![3]));
-			}
-			exec_success()
-		});
-		let code_charlie = MockLoader::insert(Call, |ctx, _| {
-			assert!(ctx
-				.ext
-				.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false)
-				.is_ok());
-			exec_trapped()
-		});
-
-		// This one tests passing the input data into a contract via call.
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			place_contract(&CHARLIE, code_charlie);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn ecdsa_to_eth_address_returns_proper_value() {
-		let bob_ch = MockLoader::insert(Call, |ctx, _| {
-			let pubkey_compressed = array_bytes::hex2array_unchecked(
-				"028db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd91",
-			);
-			assert_eq!(
-				ctx.ext.ecdsa_to_eth_address(&pubkey_compressed).unwrap(),
-				array_bytes::hex2array_unchecked::<_, 20>(
-					"09231da7b19A016f9e576d23B16277062F4d46A8"
-				)
-			);
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, bob_ch);
-
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn last_frame_output_works_on_instantiate() {
-		let ok_ch = MockLoader::insert(Constructor, move |_, _| {
-			Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] })
-		});
-		let revert_ch = MockLoader::insert(Constructor, move |_, _| {
-			Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] })
-		});
-		let trap_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into()));
-		let instantiator_ch = MockLoader::insert(Call, {
-			move |ctx, _| {
-				let value = <Test as Config>::Currency::minimum_balance().into();
-
-				// Successful instantiation should set the output
-				let address = ctx
-					.ext
-					.instantiate(Weight::MAX, U256::MAX, ok_ch, value, vec![], None)
-					.unwrap();
-				assert_eq!(
-					ctx.ext.last_frame_output(),
-					&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] }
-				);
-
-				// Balance transfers should reset the output
-				ctx.ext
-					.call(Weight::MAX, U256::MAX, &address, U256::from(1), vec![], true, false)
-					.unwrap();
-				assert_eq!(ctx.ext.last_frame_output(), &Default::default());
-
-				// Reverted instantiation should set the output
-				ctx.ext
-					.instantiate(Weight::zero(), U256::zero(), revert_ch, value, vec![], None)
-					.unwrap();
-				assert_eq!(
-					ctx.ext.last_frame_output(),
-					&ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] }
-				);
-
-				// Trapped instantiation should clear the output
-				ctx.ext
-					.instantiate(Weight::zero(), U256::zero(), trap_ch, value, vec![], None)
-					.unwrap_err();
-				assert_eq!(
-					ctx.ext.last_frame_output(),
-					&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![] }
-				);
-
-				exec_success()
-			}
-		});
-
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				set_balance(&ALICE, 1000);
-				set_balance(&BOB, 100);
-				place_contract(&BOB, instantiator_ch);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
-
-				MockStack::run_call(
-					origin,
-					BOB_ADDR,
-					&mut GasMeter::<Test>::new(GAS_LIMIT),
-					&mut storage_meter,
-					U256::zero(),
-					vec![],
-					false,
-				)
-				.unwrap()
-			});
-	}
-
-	#[test]
-	fn last_frame_output_works_on_nested_call() {
-		// Call stack: BOB -> CHARLIE(revert) -> BOB' (success)
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			if ctx.input_data.is_empty() {
-				// We didn't do anything yet
-				assert_eq!(
-					ctx.ext.last_frame_output(),
-					&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![] }
-				);
-
-				ctx.ext
-					.call(
-						Weight::zero(),
-						U256::zero(),
-						&CHARLIE_ADDR,
-						U256::zero(),
-						vec![],
-						true,
-						false,
-					)
-					.unwrap();
-				assert_eq!(
-					ctx.ext.last_frame_output(),
-					&ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] }
-				);
-			}
-
-			Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] })
-		});
-		let code_charlie = MockLoader::insert(Call, |ctx, _| {
-			// We didn't do anything yet
-			assert_eq!(
-				ctx.ext.last_frame_output(),
-				&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![] }
-			);
-
-			assert!(ctx
-				.ext
-				.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false)
-				.is_ok());
-			assert_eq!(
-				ctx.ext.last_frame_output(),
-				&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] }
-			);
-
-			Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] })
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			place_contract(&CHARLIE, code_charlie);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![0],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn last_frame_output_is_always_reset() {
-		let code_bob = MockLoader::insert(Call, |ctx, _| {
-			let invalid_code_hash = H256::from_low_u64_le(u64::MAX);
-			let output_revert = || ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1] };
-
-			// A value of u256::MAX to fail the call on the first condition.
-			*ctx.ext.last_frame_output_mut() = output_revert();
-			assert_eq!(
-				ctx.ext.call(
-					Weight::zero(),
-					U256::zero(),
-					&H160::zero(),
-					U256::max_value(),
-					vec![],
-					true,
-					false,
-				),
-				Err(Error::<Test>::BalanceConversionFailed.into())
-			);
-			assert_eq!(ctx.ext.last_frame_output(), &Default::default());
-
-			// An unknown code hash to fail the delegate_call on the first condition.
-			*ctx.ext.last_frame_output_mut() = output_revert();
-			assert_eq!(
-				ctx.ext.delegate_call(
-					Weight::zero(),
-					U256::zero(),
-					H160([0xff; 20]),
-					Default::default()
-				),
-				Err(Error::<Test>::CodeNotFound.into())
-			);
-			assert_eq!(ctx.ext.last_frame_output(), &Default::default());
-
-			// An unknown code hash to fail instantiation on the first condition.
-			*ctx.ext.last_frame_output_mut() = output_revert();
-			assert_eq!(
-				ctx.ext.instantiate(
-					Weight::zero(),
-					U256::zero(),
-					invalid_code_hash,
-					U256::zero(),
-					vec![],
-					None,
-				),
-				Err(Error::<Test>::CodeNotFound.into())
-			);
-			assert_eq!(ctx.ext.last_frame_output(), &Default::default());
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			place_contract(&BOB, code_bob);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut GasMeter::<Test>::new(GAS_LIMIT),
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-			);
-			assert_matches!(result, Ok(_));
-		});
-	}
-
-	#[test]
-	fn immutable_data_access_checks_work() {
-		let dummy_ch = MockLoader::insert(Constructor, move |ctx, _| {
-			// Calls can not store immutable data
-			assert_eq!(
-				ctx.ext.get_immutable_data(),
-				Err(Error::<Test>::InvalidImmutableAccess.into())
-			);
-			exec_success()
-		});
-		let instantiator_ch = MockLoader::insert(Call, {
-			move |ctx, _| {
-				let value = <Test as Config>::Currency::minimum_balance().into();
-
-				assert_eq!(
-					ctx.ext.set_immutable_data(vec![0, 1, 2, 3].try_into().unwrap()),
-					Err(Error::<Test>::InvalidImmutableAccess.into())
-				);
-
-				// Constructors can not access the immutable data
-				ctx.ext
-					.instantiate(Weight::MAX, U256::MAX, dummy_ch, value, vec![], None)
-					.unwrap();
-
-				exec_success()
-			}
-		});
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				set_balance(&ALICE, 1000);
-				set_balance(&BOB, 100);
-				place_contract(&BOB, instantiator_ch);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
-
-				MockStack::run_call(
-					origin,
-					BOB_ADDR,
-					&mut GasMeter::<Test>::new(GAS_LIMIT),
-					&mut storage_meter,
-					U256::zero(),
-					vec![],
-					false,
-				)
-				.unwrap()
-			});
-	}
-
-	#[test]
-	fn correct_immutable_data_in_delegate_call() {
-		let charlie_ch = MockLoader::insert(Call, |ctx, _| {
-			Ok(ExecReturnValue {
-				flags: ReturnFlags::empty(),
-				data: ctx.ext.get_immutable_data()?.to_vec(),
-			})
-		});
-		let bob_ch = MockLoader::insert(Call, move |ctx, _| {
-			// In a regular call, we should witness the callee immutable data
-			assert_eq!(
-				ctx.ext
-					.call(
-						Weight::zero(),
-						U256::zero(),
-						&CHARLIE_ADDR,
-						U256::zero(),
-						vec![],
-						true,
-						false,
-					)
-					.map(|_| ctx.ext.last_frame_output().data.clone()),
-				Ok(vec![2]),
-			);
-
-			// Also in a delegate call, we should witness the callee immutable data
-			assert_eq!(
-				ctx.ext
-					.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())
-					.map(|_| ctx.ext.last_frame_output().data.clone()),
-				Ok(vec![2])
-			);
-
-			exec_success()
-		});
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				place_contract(&BOB, bob_ch);
-				place_contract(&CHARLIE, charlie_ch);
-
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
-
-				// Place unique immutable data for each contract
-				<ImmutableDataOf<Test>>::insert::<_, ImmutableData>(
-					BOB_ADDR,
-					vec![1].try_into().unwrap(),
-				);
-				<ImmutableDataOf<Test>>::insert::<_, ImmutableData>(
-					CHARLIE_ADDR,
-					vec![2].try_into().unwrap(),
-				);
-
-				MockStack::run_call(
-					origin,
-					BOB_ADDR,
-					&mut GasMeter::<Test>::new(GAS_LIMIT),
-					&mut storage_meter,
-					U256::zero(),
-					vec![],
-					false,
-				)
-				.unwrap()
-			});
-	}
-
-	#[test]
-	fn immutable_data_set_overrides() {
-		let hash = MockLoader::insert_both(
-			move |ctx, _| {
-				// Calling `set_immutable_data` the first time should work
-				assert_ok!(ctx.ext.set_immutable_data(vec![0, 1, 2, 3].try_into().unwrap()));
-				// Calling `set_immutable_data` the second time overrides the original one
-				assert_ok!(ctx.ext.set_immutable_data(vec![7, 5].try_into().unwrap()));
-				exec_success()
-			},
-			move |ctx, _| {
-				assert_eq!(ctx.ext.get_immutable_data().unwrap().into_inner(), vec![7, 5]);
-				exec_success()
-			},
-		);
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				set_balance(&ALICE, 1000);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
-				let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-
-				let addr = MockStack::run_instantiate(
-					ALICE,
-					MockExecutable::from_storage(hash, &mut gas_meter).unwrap(),
-					&mut gas_meter,
-					&mut storage_meter,
-					U256::zero(),
-					vec![],
-					None,
-					false,
-				)
-				.unwrap()
-				.0;
-
-				MockStack::run_call(
-					origin,
-					addr,
-					&mut GasMeter::<Test>::new(GAS_LIMIT),
-					&mut storage_meter,
-					U256::zero(),
-					vec![],
-					false,
-				)
-				.unwrap()
-			});
-	}
-
-	#[test]
-	fn immutable_data_set_errors_with_empty_data() {
-		let dummy_ch = MockLoader::insert(Constructor, move |ctx, _| {
-			// Calling `set_immutable_data` with empty data should error out
-			assert_eq!(
-				ctx.ext.set_immutable_data(Default::default()),
-				Err(Error::<Test>::InvalidImmutableAccess.into())
-			);
-			exec_success()
-		});
-		let instantiator_ch = MockLoader::insert(Call, {
-			move |ctx, _| {
-				let value = <Test as Config>::Currency::minimum_balance().into();
-				ctx.ext
-					.instantiate(Weight::MAX, U256::MAX, dummy_ch, value, vec![], None)
-					.unwrap();
-
-				exec_success()
-			}
-		});
-		ExtBuilder::default()
-			.with_code_hashes(MockLoader::code_hashes())
-			.existential_deposit(15)
-			.build()
-			.execute_with(|| {
-				set_balance(&ALICE, 1000);
-				set_balance(&BOB, 100);
-				place_contract(&BOB, instantiator_ch);
-				let origin = Origin::from_account_id(ALICE);
-				let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
-
-				MockStack::run_call(
-					origin,
-					BOB_ADDR,
-					&mut GasMeter::<Test>::new(GAS_LIMIT),
-					&mut storage_meter,
-					U256::zero(),
-					vec![],
-					false,
-				)
-				.unwrap()
-			});
-	}
-
-	#[test]
-	fn block_hash_returns_proper_values() {
-		let bob_code_hash = MockLoader::insert(Call, |ctx, _| {
-			ctx.ext.block_number = 1u32.into();
-			assert_eq!(ctx.ext.block_hash(U256::from(1)), None);
-			assert_eq!(ctx.ext.block_hash(U256::from(0)), Some(H256::from([1; 32])));
-
-			ctx.ext.block_number = 300u32.into();
-			assert_eq!(ctx.ext.block_hash(U256::from(300)), None);
-			assert_eq!(ctx.ext.block_hash(U256::from(43)), None);
-			assert_eq!(ctx.ext.block_hash(U256::from(44)), Some(H256::from([2; 32])));
-
-			exec_success()
-		});
-
-		ExtBuilder::default().build().execute_with(|| {
-			frame_system::BlockHash::<Test>::insert(
-				&BlockNumberFor::<Test>::from(0u32),
-				<tests::Test as frame_system::Config>::Hash::from([1; 32]),
-			);
-			frame_system::BlockHash::<Test>::insert(
-				&BlockNumberFor::<Test>::from(1u32),
-				<tests::Test as frame_system::Config>::Hash::default(),
-			);
-			frame_system::BlockHash::<Test>::insert(
-				&BlockNumberFor::<Test>::from(43u32),
-				<tests::Test as frame_system::Config>::Hash::default(),
-			);
-			frame_system::BlockHash::<Test>::insert(
-				&BlockNumberFor::<Test>::from(44u32),
-				<tests::Test as frame_system::Config>::Hash::from([2; 32]),
-			);
-			frame_system::BlockHash::<Test>::insert(
-				&BlockNumberFor::<Test>::from(300u32),
-				<tests::Test as frame_system::Config>::Hash::default(),
-			);
-
-			place_contract(&BOB, bob_code_hash);
-
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			assert_matches!(
-				MockStack::run_call(
-					origin,
-					BOB_ADDR,
-					&mut GasMeter::<Test>::new(GAS_LIMIT),
-					&mut storage_meter,
-					U256::zero(),
-					vec![0],
-					false,
-				),
-				Ok(_)
-			);
-		});
-	}
-}
diff --git a/substrate/frame/revive/src/exec/tests.rs b/substrate/frame/revive/src/exec/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..2511715654c171ea5de097571ba581fb409b5068
--- /dev/null
+++ b/substrate/frame/revive/src/exec/tests.rs
@@ -0,0 +1,2949 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/// These tests exercise the executive layer.
+///
+/// In these tests the VM/loader are mocked. Instead of dealing with wasm bytecode they use
+/// simple closures. This allows you to tackle executive logic more thoroughly without writing
+/// a wasm VM code.
+#[cfg(test)]
+use super::*;
+use crate::{
+	exec::ExportedFunction::*,
+	gas::GasMeter,
+	test_utils::*,
+	tests::{
+		test_utils::{get_balance, place_contract, set_balance},
+		ExtBuilder, RuntimeCall, RuntimeEvent as MetaEvent, Test, TestFilter,
+	},
+	AddressMapper, Error,
+};
+use assert_matches::assert_matches;
+use frame_support::{assert_err, assert_noop, assert_ok, parameter_types};
+use frame_system::{AccountInfo, EventRecord, Phase};
+use pallet_revive_uapi::ReturnFlags;
+use pretty_assertions::assert_eq;
+use sp_io::hashing::keccak_256;
+use sp_runtime::{traits::Hash, DispatchError};
+use std::{cell::RefCell, collections::hash_map::HashMap, rc::Rc};
+
+type System = frame_system::Pallet<Test>;
+
+type MockStack<'a> = Stack<'a, Test, MockExecutable>;
+
+parameter_types! {
+	static Loader: MockLoader = MockLoader::default();
+}
+
+fn events() -> Vec<Event<Test>> {
+	System::events()
+		.into_iter()
+		.filter_map(|meta| match meta.event {
+			MetaEvent::Contracts(contract_event) => Some(contract_event),
+			_ => None,
+		})
+		.collect()
+}
+
+struct MockCtx<'a> {
+	ext: &'a mut MockStack<'a>,
+	input_data: Vec<u8>,
+}
+
+#[derive(Clone)]
+struct MockExecutable {
+	func: Rc<dyn for<'a> Fn(MockCtx<'a>, &Self) -> ExecResult + 'static>,
+	constructor: Rc<dyn for<'a> Fn(MockCtx<'a>, &Self) -> ExecResult + 'static>,
+	code_hash: H256,
+	code_info: CodeInfo<Test>,
+}
+
+#[derive(Default, Clone)]
+pub struct MockLoader {
+	map: HashMap<H256, MockExecutable>,
+	counter: u64,
+}
+
+impl MockLoader {
+	fn code_hashes() -> Vec<H256> {
+		Loader::get().map.keys().copied().collect()
+	}
+
+	fn insert(
+		func_type: ExportedFunction,
+		f: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static,
+	) -> H256 {
+		Loader::mutate(|loader| {
+			// Generate code hashes from contract index value.
+			let hash = H256(keccak_256(&loader.counter.to_le_bytes()));
+			loader.counter += 1;
+			if func_type == ExportedFunction::Constructor {
+				loader.map.insert(
+					hash,
+					MockExecutable {
+						func: Rc::new(|_, _| exec_success()),
+						constructor: Rc::new(f),
+						code_hash: hash,
+						code_info: CodeInfo::<Test>::new(ALICE),
+					},
+				);
+			} else {
+				loader.map.insert(
+					hash,
+					MockExecutable {
+						func: Rc::new(f),
+						constructor: Rc::new(|_, _| exec_success()),
+						code_hash: hash,
+						code_info: CodeInfo::<Test>::new(ALICE),
+					},
+				);
+			}
+			hash
+		})
+	}
+
+	fn insert_both(
+		constructor: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static,
+		call: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static,
+	) -> H256 {
+		Loader::mutate(|loader| {
+			// Generate code hashes from contract index value.
+			let hash = H256(keccak_256(&loader.counter.to_le_bytes()));
+			loader.counter += 1;
+			loader.map.insert(
+				hash,
+				MockExecutable {
+					func: Rc::new(call),
+					constructor: Rc::new(constructor),
+					code_hash: hash,
+					code_info: CodeInfo::<Test>::new(ALICE),
+				},
+			);
+			hash
+		})
+	}
+}
+
+impl Executable<Test> for MockExecutable {
+	fn from_storage(
+		code_hash: H256,
+		_gas_meter: &mut GasMeter<Test>,
+	) -> Result<Self, DispatchError> {
+		Loader::mutate(|loader| {
+			loader.map.get(&code_hash).cloned().ok_or(Error::<Test>::CodeNotFound.into())
+		})
+	}
+
+	fn execute<E: Ext<T = Test>>(
+		self,
+		ext: &mut E,
+		function: ExportedFunction,
+		input_data: Vec<u8>,
+	) -> ExecResult {
+		// # Safety
+		//
+		// We know that we **always** call execute with a `MockStack` in this test.
+		//
+		// # Note
+		//
+		// The transmute is necessary because `execute` has to be generic over all
+		// `E: Ext`. However, `MockExecutable` can't be generic over `E` as it would
+		// constitute a cycle.
+		let ext = unsafe { mem::transmute(ext) };
+		if function == ExportedFunction::Constructor {
+			(self.constructor)(MockCtx { ext, input_data }, &self)
+		} else {
+			(self.func)(MockCtx { ext, input_data }, &self)
+		}
+	}
+
+	fn code(&self) -> &[u8] {
+		// The mock executable doesn't have code", so we return the code hash.
+		self.code_hash.as_ref()
+	}
+
+	fn code_hash(&self) -> &H256 {
+		&self.code_hash
+	}
+
+	fn code_info(&self) -> &CodeInfo<Test> {
+		&self.code_info
+	}
+}
+
+fn exec_success() -> ExecResult {
+	Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
+}
+
+fn exec_trapped() -> ExecResult {
+	Err(ExecError { error: <Error<Test>>::ContractTrapped.into(), origin: ErrorOrigin::Callee })
+}
+
+#[test]
+fn it_works() {
+	parameter_types! {
+		static TestData: Vec<usize> = vec![0];
+	}
+
+	let value = Default::default();
+	let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+	let exec_ch = MockLoader::insert(Call, |_ctx, _executable| {
+		TestData::mutate(|data| data.push(1));
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, exec_ch);
+		let mut storage_meter =
+			storage::meter::Meter::new(&Origin::from_account_id(ALICE), 0, value).unwrap();
+
+		assert_matches!(
+			MockStack::run_call(
+				Origin::from_account_id(ALICE),
+				BOB_ADDR,
+				&mut gas_meter,
+				&mut storage_meter,
+				value.into(),
+				vec![],
+				false,
+			),
+			Ok(_)
+		);
+	});
+
+	assert_eq!(TestData::get(), vec![0, 1]);
+}
+
+#[test]
+fn transfer_works() {
+	// This test verifies that a contract is able to transfer
+	// some funds to another account.
+	ExtBuilder::default().build().execute_with(|| {
+		set_balance(&ALICE, 100);
+		set_balance(&BOB, 0);
+
+		let origin = Origin::from_account_id(ALICE);
+		MockStack::transfer(&origin, &ALICE, &BOB, 55u64.into()).unwrap();
+
+		let min_balance = <Test as Config>::Currency::minimum_balance();
+		assert_eq!(get_balance(&ALICE), 45 - min_balance);
+		assert_eq!(get_balance(&BOB), 55 + min_balance);
+	});
+}
+
+#[test]
+fn transfer_to_nonexistent_account_works() {
+	// This test verifies that a contract is able to transfer
+	// some funds to a nonexistant account and that those transfers
+	// are not able to reap accounts.
+	ExtBuilder::default().build().execute_with(|| {
+		let ed = <Test as Config>::Currency::minimum_balance();
+		let value = 1024;
+
+		// Transfers to nonexistant accounts should work
+		set_balance(&ALICE, ed * 2);
+		set_balance(&BOB, ed + value);
+
+		assert_ok!(MockStack::transfer(
+			&Origin::from_account_id(ALICE),
+			&BOB,
+			&CHARLIE,
+			value.into()
+		));
+		assert_eq!(get_balance(&ALICE), ed);
+		assert_eq!(get_balance(&BOB), ed);
+		assert_eq!(get_balance(&CHARLIE), ed + value);
+
+		// Do not reap the origin account
+		set_balance(&ALICE, ed);
+		set_balance(&BOB, ed + value);
+		assert_err!(
+			MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &DJANGO, value.into()),
+			<Error<Test>>::TransferFailed
+		);
+
+		// Do not reap the sender account
+		set_balance(&ALICE, ed * 2);
+		set_balance(&BOB, value);
+		assert_err!(
+			MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &EVE, value.into()),
+			<Error<Test>>::TransferFailed
+		);
+		// The ED transfer would work. But it should only be executed with the actual transfer
+		assert!(!System::account_exists(&EVE));
+	});
+}
+
+#[test]
+fn correct_transfer_on_call() {
+	let value = 55;
+
+	let success_ch = MockLoader::insert(Call, move |ctx, _| {
+		assert_eq!(ctx.ext.value_transferred(), U256::from(value));
+		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, success_ch);
+		set_balance(&ALICE, 100);
+		let balance = get_balance(&BOB_FALLBACK);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, value).unwrap();
+
+		let _ = MockStack::run_call(
+			origin.clone(),
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			value.into(),
+			vec![],
+			false,
+		)
+		.unwrap();
+
+		assert_eq!(get_balance(&ALICE), 100 - value);
+		assert_eq!(get_balance(&BOB_FALLBACK), balance + value);
+	});
+}
+
+#[test]
+fn correct_transfer_on_delegate_call() {
+	let value = 35;
+
+	let success_ch = MockLoader::insert(Call, move |ctx, _| {
+		assert_eq!(ctx.ext.value_transferred(), U256::from(value));
+		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
+	});
+
+	let delegate_ch = MockLoader::insert(Call, move |ctx, _| {
+		assert_eq!(ctx.ext.value_transferred(), U256::from(value));
+		let _ = ctx.ext.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())?;
+		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, delegate_ch);
+		place_contract(&CHARLIE, success_ch);
+		set_balance(&ALICE, 100);
+		let balance = get_balance(&BOB_FALLBACK);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap();
+
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			value.into(),
+			vec![],
+			false,
+		));
+
+		assert_eq!(get_balance(&ALICE), 100 - value);
+		assert_eq!(get_balance(&BOB_FALLBACK), balance + value);
+	});
+}
+
+#[test]
+fn delegate_call_missing_contract() {
+	let missing_ch = MockLoader::insert(Call, move |_ctx, _| {
+		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
+	});
+
+	let delegate_ch = MockLoader::insert(Call, move |ctx, _| {
+		let _ = ctx.ext.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())?;
+		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() })
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, delegate_ch);
+		set_balance(&ALICE, 100);
+
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap();
+
+		// contract code missing
+		assert_noop!(
+			MockStack::run_call(
+				origin.clone(),
+				BOB_ADDR,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				vec![],
+				false,
+			),
+			ExecError { error: Error::<Test>::CodeNotFound.into(), origin: ErrorOrigin::Callee }
+		);
+
+		// add missing contract code
+		place_contract(&CHARLIE, missing_ch);
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		));
+	});
+}
+
+#[test]
+fn changes_are_reverted_on_failing_call() {
+	// This test verifies that changes are reverted on a call which fails (or equally, returns
+	// a non-zero status code).
+
+	let return_ch = MockLoader::insert(Call, |_, _| {
+		Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() })
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, return_ch);
+		set_balance(&ALICE, 100);
+		let balance = get_balance(&BOB);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap();
+
+		let output = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			55u64.into(),
+			vec![],
+			false,
+		)
+		.unwrap();
+
+		assert!(output.did_revert());
+		assert_eq!(get_balance(&ALICE), 100);
+		assert_eq!(get_balance(&BOB), balance);
+	});
+}
+
+#[test]
+fn balance_too_low() {
+	// This test verifies that a contract can't send value if it's
+	// balance is too low.
+	let from = ALICE;
+	let origin = Origin::from_account_id(ALICE);
+	let dest = BOB;
+
+	ExtBuilder::default().build().execute_with(|| {
+		set_balance(&from, 0);
+
+		let result = MockStack::transfer(&origin, &from, &dest, 100u64.into());
+
+		assert_eq!(result, Err(Error::<Test>::TransferFailed.into()));
+		assert_eq!(get_balance(&from), 0);
+		assert_eq!(get_balance(&dest), 0);
+	});
+}
+
+#[test]
+fn output_is_returned_on_success() {
+	// Verifies that if a contract returns data with a successful exit status, this data
+	// is returned from the execution context.
+	let return_ch = MockLoader::insert(Call, |_, _| {
+		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] })
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		place_contract(&BOB, return_ch);
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		);
+
+		let output = result.unwrap();
+		assert!(!output.did_revert());
+		assert_eq!(output.data, vec![1, 2, 3, 4]);
+	});
+}
+
+#[test]
+fn output_is_returned_on_failure() {
+	// Verifies that if a contract returns data with a failing exit status, this data
+	// is returned from the execution context.
+	let return_ch = MockLoader::insert(Call, |_, _| {
+		Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] })
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, return_ch);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		);
+
+		let output = result.unwrap();
+		assert!(output.did_revert());
+		assert_eq!(output.data, vec![1, 2, 3, 4]);
+	});
+}
+
+#[test]
+fn input_data_to_call() {
+	let input_data_ch = MockLoader::insert(Call, |ctx, _| {
+		assert_eq!(ctx.input_data, &[1, 2, 3, 4]);
+		exec_success()
+	});
+
+	// This one tests passing the input data into a contract via call.
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, input_data_ch);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![1, 2, 3, 4],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn input_data_to_instantiate() {
+	let input_data_ch = MockLoader::insert(Constructor, |ctx, _| {
+		assert_eq!(ctx.input_data, &[1, 2, 3, 4]);
+		exec_success()
+	});
+
+	// This one tests passing the input data into a contract via instantiate.
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.build()
+		.execute_with(|| {
+			let min_balance = <Test as Config>::Currency::minimum_balance();
+			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+			let executable = MockExecutable::from_storage(input_data_ch, &mut gas_meter).unwrap();
+			set_balance(&ALICE, min_balance * 10_000);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter =
+				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), min_balance).unwrap();
+
+			let result = MockStack::run_instantiate(
+				ALICE,
+				executable,
+				&mut gas_meter,
+				&mut storage_meter,
+				min_balance.into(),
+				vec![1, 2, 3, 4],
+				Some(&[0; 32]),
+				false,
+			);
+			assert_matches!(result, Ok(_));
+		});
+}
+
+#[test]
+fn max_depth() {
+	// This test verifies that when we reach the maximal depth creation of an
+	// yet another context fails.
+	parameter_types! {
+		static ReachedBottom: bool = false;
+	}
+	let value = Default::default();
+	let recurse_ch = MockLoader::insert(Call, |ctx, _| {
+		// Try to call into yourself.
+		let r = ctx.ext.call(
+			Weight::zero(),
+			U256::zero(),
+			&BOB_ADDR,
+			U256::zero(),
+			vec![],
+			true,
+			false,
+		);
+
+		ReachedBottom::mutate(|reached_bottom| {
+			if !*reached_bottom {
+				// We are first time here, it means we just reached bottom.
+				// Verify that we've got proper error and set `reached_bottom`.
+				assert_eq!(r, Err(Error::<Test>::MaxCallDepthReached.into()));
+				*reached_bottom = true;
+			} else {
+				// We just unwinding stack here.
+				assert_matches!(r, Ok(_));
+			}
+		});
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		set_balance(&BOB, 1);
+		place_contract(&BOB, recurse_ch);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, value).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			value.into(),
+			vec![],
+			false,
+		);
+
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn caller_returns_proper_values() {
+	parameter_types! {
+		static WitnessedCallerBob: Option<H160> = None;
+		static WitnessedCallerCharlie: Option<H160> = None;
+	}
+
+	let bob_ch = MockLoader::insert(Call, |ctx, _| {
+		// Record the caller for bob.
+		WitnessedCallerBob::mutate(|caller| {
+			let origin = ctx.ext.caller();
+			*caller = Some(<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_address(
+				&origin.account_id().unwrap(),
+			));
+		});
+
+		// Call into CHARLIE contract.
+		assert_matches!(
+			ctx.ext.call(
+				Weight::zero(),
+				U256::zero(),
+				&CHARLIE_ADDR,
+				U256::zero(),
+				vec![],
+				true,
+				false
+			),
+			Ok(_)
+		);
+		exec_success()
+	});
+	let charlie_ch = MockLoader::insert(Call, |ctx, _| {
+		// Record the caller for charlie.
+		WitnessedCallerCharlie::mutate(|caller| {
+			let origin = ctx.ext.caller();
+			*caller = Some(<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_address(
+				&origin.account_id().unwrap(),
+			));
+		});
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, bob_ch);
+		place_contract(&CHARLIE, charlie_ch);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		);
+
+		assert_matches!(result, Ok(_));
+	});
+
+	assert_eq!(WitnessedCallerBob::get(), Some(ALICE_ADDR));
+	assert_eq!(WitnessedCallerCharlie::get(), Some(BOB_ADDR));
+}
+
+#[test]
+fn origin_returns_proper_values() {
+	parameter_types! {
+		static WitnessedCallerBob: Option<H160> = None;
+		static WitnessedCallerCharlie: Option<H160> = None;
+	}
+
+	let bob_ch = MockLoader::insert(Call, |ctx, _| {
+		// Record the origin for bob.
+		WitnessedCallerBob::mutate(|witness| {
+			let origin = ctx.ext.origin();
+			*witness =
+				Some(<Test as Config>::AddressMapper::to_address(&origin.account_id().unwrap()));
+		});
+
+		// Call into CHARLIE contract.
+		assert_matches!(
+			ctx.ext.call(
+				Weight::zero(),
+				U256::zero(),
+				&CHARLIE_ADDR,
+				U256::zero(),
+				vec![],
+				true,
+				false
+			),
+			Ok(_)
+		);
+		exec_success()
+	});
+	let charlie_ch = MockLoader::insert(Call, |ctx, _| {
+		// Record the origin for charlie.
+		WitnessedCallerCharlie::mutate(|witness| {
+			let origin = ctx.ext.origin();
+			*witness =
+				Some(<Test as Config>::AddressMapper::to_address(&origin.account_id().unwrap()));
+		});
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, bob_ch);
+		place_contract(&CHARLIE, charlie_ch);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		);
+
+		assert_matches!(result, Ok(_));
+	});
+
+	assert_eq!(WitnessedCallerBob::get(), Some(ALICE_ADDR));
+	assert_eq!(WitnessedCallerCharlie::get(), Some(ALICE_ADDR));
+}
+
+#[test]
+fn is_contract_returns_proper_values() {
+	let bob_ch = MockLoader::insert(Call, |ctx, _| {
+		// Verify that BOB is a contract
+		assert!(ctx.ext.is_contract(&BOB_ADDR));
+		// Verify that ALICE is not a contract
+		assert!(!ctx.ext.is_contract(&ALICE_ADDR));
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, bob_ch);
+
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn to_account_id_returns_proper_values() {
+	let bob_code_hash = MockLoader::insert(Call, |ctx, _| {
+		let alice_account_id = <Test as Config>::AddressMapper::to_account_id(&ALICE_ADDR);
+		assert_eq!(ctx.ext.to_account_id(&ALICE_ADDR), alice_account_id);
+
+		const UNMAPPED_ADDR: H160 = H160([99u8; 20]);
+		let mut unmapped_fallback_account_id = [0xEE; 32];
+		unmapped_fallback_account_id[..20].copy_from_slice(UNMAPPED_ADDR.as_bytes());
+		assert_eq!(
+			ctx.ext.to_account_id(&UNMAPPED_ADDR),
+			AccountId32::new(unmapped_fallback_account_id)
+		);
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, bob_code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn code_hash_returns_proper_values() {
+	let bob_code_hash = MockLoader::insert(Call, |ctx, _| {
+		// ALICE is not a contract but account exists so it returns hash of empty data
+		assert_eq!(ctx.ext.code_hash(&ALICE_ADDR), EMPTY_CODE_HASH);
+		// BOB is a contract (this function) and hence it has a code_hash.
+		// `MockLoader` uses contract index to generate the code hash.
+		assert_eq!(ctx.ext.code_hash(&BOB_ADDR), H256(keccak_256(&0u64.to_le_bytes())));
+		// [0xff;20] doesn't exist and returns hash zero
+		assert!(ctx.ext.code_hash(&H160([0xff; 20])).is_zero());
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		// add alice account info to test case EOA code hash
+		frame_system::Account::<Test>::insert(
+			<Test as Config>::AddressMapper::to_account_id(&ALICE_ADDR),
+			AccountInfo { consumers: 1, providers: 1, ..Default::default() },
+		);
+		place_contract(&BOB, bob_code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		// ALICE (not contract) -> BOB (contract)
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn own_code_hash_returns_proper_values() {
+	let bob_ch = MockLoader::insert(Call, |ctx, _| {
+		let code_hash = ctx.ext.code_hash(&BOB_ADDR);
+		assert_eq!(*ctx.ext.own_code_hash(), code_hash);
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, bob_ch);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		// ALICE (not contract) -> BOB (contract)
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn caller_is_origin_returns_proper_values() {
+	let code_charlie = MockLoader::insert(Call, |ctx, _| {
+		// BOB is not the origin of the stack call
+		assert!(!ctx.ext.caller_is_origin());
+		exec_success()
+	});
+
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		// ALICE is the origin of the call stack
+		assert!(ctx.ext.caller_is_origin());
+		// BOB calls CHARLIE
+		ctx.ext
+			.call(Weight::zero(), U256::zero(), &CHARLIE_ADDR, U256::zero(), vec![], true, false)
+			.map(|_| ctx.ext.last_frame_output().clone())
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		place_contract(&CHARLIE, code_charlie);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		// ALICE -> BOB (caller is origin) -> CHARLIE (caller is not origin)
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn root_caller_succeeds() {
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		// root is the origin of the call stack.
+		assert!(ctx.ext.caller_is_root());
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		let origin = Origin::Root;
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		// root -> BOB (caller is root)
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn root_caller_does_not_succeed_when_value_not_zero() {
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		// root is the origin of the call stack.
+		assert!(ctx.ext.caller_is_root());
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		let origin = Origin::Root;
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		// root -> BOB (caller is root)
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			1u64.into(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Err(_));
+	});
+}
+
+#[test]
+fn root_caller_succeeds_with_consecutive_calls() {
+	let code_charlie = MockLoader::insert(Call, |ctx, _| {
+		// BOB is not root, even though the origin is root.
+		assert!(!ctx.ext.caller_is_root());
+		exec_success()
+	});
+
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		// root is the origin of the call stack.
+		assert!(ctx.ext.caller_is_root());
+		// BOB calls CHARLIE.
+		ctx.ext
+			.call(Weight::zero(), U256::zero(), &CHARLIE_ADDR, U256::zero(), vec![], true, false)
+			.map(|_| ctx.ext.last_frame_output().clone())
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		place_contract(&CHARLIE, code_charlie);
+		let origin = Origin::Root;
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		// root -> BOB (caller is root) -> CHARLIE (caller is not root)
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn address_returns_proper_values() {
+	let bob_ch = MockLoader::insert(Call, |ctx, _| {
+		// Verify that address matches BOB.
+		assert_eq!(ctx.ext.address(), BOB_ADDR);
+
+		// Call into charlie contract.
+		assert_matches!(
+			ctx.ext.call(
+				Weight::zero(),
+				U256::zero(),
+				&CHARLIE_ADDR,
+				U256::zero(),
+				vec![],
+				true,
+				false
+			),
+			Ok(_)
+		);
+		exec_success()
+	});
+	let charlie_ch = MockLoader::insert(Call, |ctx, _| {
+		assert_eq!(ctx.ext.address(), CHARLIE_ADDR);
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, bob_ch);
+		place_contract(&CHARLIE, charlie_ch);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		);
+
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn refuse_instantiate_with_value_below_existential_deposit() {
+	let dummy_ch = MockLoader::insert(Constructor, |_, _| exec_success());
+
+	ExtBuilder::default().existential_deposit(15).build().execute_with(|| {
+		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+		let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap();
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		assert_matches!(
+			MockStack::run_instantiate(
+				ALICE,
+				executable,
+				&mut gas_meter,
+				&mut storage_meter,
+				U256::zero(), // <- zero value
+				vec![],
+				Some(&[0; 32]),
+				false,
+			),
+			Err(_)
+		);
+	});
+}
+
+#[test]
+fn instantiation_work_with_success_output() {
+	let dummy_ch = MockLoader::insert(Constructor, |_, _| {
+		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] })
+	});
+
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			let min_balance = <Test as Config>::Currency::minimum_balance();
+			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+			let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap();
+			set_balance(&ALICE, min_balance * 1000);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter =
+				storage::meter::Meter::new(&origin, min_balance * 100, min_balance).unwrap();
+
+			let instantiated_contract_address = assert_matches!(
+				MockStack::run_instantiate(
+					ALICE,
+					executable,
+					&mut gas_meter,
+					&mut storage_meter,
+					min_balance.into(),
+					vec![],
+					Some(&[0 ;32]),
+					false,
+				),
+				Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address
+			);
+			let instantiated_contract_id =
+				<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_fallback_account_id(
+					&instantiated_contract_address,
+				);
+
+			// Check that the newly created account has the expected code hash and
+			// there are instantiation event.
+			assert_eq!(
+				ContractInfo::<Test>::load_code_hash(&instantiated_contract_id).unwrap(),
+				dummy_ch
+			);
+		});
+}
+
+#[test]
+fn instantiation_fails_with_failing_output() {
+	let dummy_ch = MockLoader::insert(Constructor, |_, _| {
+		Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] })
+	});
+
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			let min_balance = <Test as Config>::Currency::minimum_balance();
+			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+			let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap();
+			set_balance(&ALICE, min_balance * 1000);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter =
+				storage::meter::Meter::new(&origin, min_balance * 100, min_balance).unwrap();
+
+			let instantiated_contract_address = assert_matches!(
+				MockStack::run_instantiate(
+					ALICE,
+					executable,
+					&mut gas_meter,
+					&mut storage_meter,
+					min_balance.into(),
+					vec![],
+					Some(&[0; 32]),
+					false,
+				),
+				Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address
+			);
+
+			let instantiated_contract_id =
+				<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_fallback_account_id(
+					&instantiated_contract_address,
+				);
+
+			// Check that the account has not been created.
+			assert!(ContractInfo::<Test>::load_code_hash(&instantiated_contract_id).is_none());
+			assert!(events().is_empty());
+		});
+}
+
+#[test]
+fn instantiation_from_contract() {
+	let dummy_ch = MockLoader::insert(Call, |_, _| exec_success());
+	let instantiated_contract_address = Rc::new(RefCell::new(None::<H160>));
+	let instantiator_ch = MockLoader::insert(Call, {
+		let instantiated_contract_address = Rc::clone(&instantiated_contract_address);
+		move |ctx, _| {
+			// Instantiate a contract and save it's address in `instantiated_contract_address`.
+			let (address, output) = ctx
+				.ext
+				.instantiate(
+					Weight::MAX,
+					U256::MAX,
+					dummy_ch,
+					<Test as Config>::Currency::minimum_balance().into(),
+					vec![],
+					Some(&[48; 32]),
+				)
+				.map(|address| (address, ctx.ext.last_frame_output().clone()))
+				.unwrap();
+
+			*instantiated_contract_address.borrow_mut() = Some(address);
+			Ok(output)
+		}
+	});
+
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			let min_balance = <Test as Config>::Currency::minimum_balance();
+			set_balance(&ALICE, min_balance * 100);
+			place_contract(&BOB, instantiator_ch);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter =
+				storage::meter::Meter::new(&origin, min_balance * 10, min_balance * 10).unwrap();
+
+			assert_matches!(
+				MockStack::run_call(
+					origin,
+					BOB_ADDR,
+					&mut GasMeter::<Test>::new(GAS_LIMIT),
+					&mut storage_meter,
+					(min_balance * 10).into(),
+					vec![],
+					false,
+				),
+				Ok(_)
+			);
+
+			let instantiated_contract_address =
+				*instantiated_contract_address.borrow().as_ref().unwrap();
+
+			let instantiated_contract_id =
+				<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_fallback_account_id(
+					&instantiated_contract_address,
+				);
+
+			// Check that the newly created account has the expected code hash and
+			// there are instantiation event.
+			assert_eq!(
+				ContractInfo::<Test>::load_code_hash(&instantiated_contract_id).unwrap(),
+				dummy_ch
+			);
+		});
+}
+
+#[test]
+fn instantiation_traps() {
+	let dummy_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into()));
+	let instantiator_ch = MockLoader::insert(Call, {
+		move |ctx, _| {
+			// Instantiate a contract and save it's address in `instantiated_contract_address`.
+			assert_matches!(
+				ctx.ext.instantiate(
+					Weight::zero(),
+					U256::zero(),
+					dummy_ch,
+					<Test as Config>::Currency::minimum_balance().into(),
+					vec![],
+					Some(&[0; 32]),
+				),
+				Err(ExecError {
+					error: DispatchError::Other("It's a trap!"),
+					origin: ErrorOrigin::Callee,
+				})
+			);
+
+			exec_success()
+		}
+	});
+
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			set_balance(&ALICE, 1000);
+			set_balance(&BOB_FALLBACK, 100);
+			place_contract(&BOB, instantiator_ch);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
+
+			assert_matches!(
+				MockStack::run_call(
+					origin,
+					BOB_ADDR,
+					&mut GasMeter::<Test>::new(GAS_LIMIT),
+					&mut storage_meter,
+					U256::zero(),
+					vec![],
+					false,
+				),
+				Ok(_)
+			);
+		});
+}
+
+#[test]
+fn termination_from_instantiate_fails() {
+	let terminate_ch = MockLoader::insert(Constructor, |ctx, _| {
+		ctx.ext.terminate(&ALICE_ADDR)?;
+		exec_success()
+	});
+
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+			let executable = MockExecutable::from_storage(terminate_ch, &mut gas_meter).unwrap();
+			set_balance(&ALICE, 10_000);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter =
+				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 100).unwrap();
+
+			assert_eq!(
+				MockStack::run_instantiate(
+					ALICE,
+					executable,
+					&mut gas_meter,
+					&mut storage_meter,
+					100u64.into(),
+					vec![],
+					Some(&[0; 32]),
+					false,
+				),
+				Err(ExecError {
+					error: Error::<Test>::TerminatedInConstructor.into(),
+					origin: ErrorOrigin::Callee
+				})
+			);
+
+			assert_eq!(&events(), &[]);
+		});
+}
+
+#[test]
+fn in_memory_changes_not_discarded() {
+	// Call stack: BOB -> CHARLIE (trap) -> BOB' (success)
+	// This tests verifies some edge case of the contract info cache:
+	// We change some value in our contract info before calling into a contract
+	// that calls into ourself. This triggers a case where BOBs contract info
+	// is written to storage and invalidated by the successful execution of BOB'.
+	// The trap of CHARLIE reverts the storage changes to BOB. When the root BOB regains
+	// control it reloads its contract info from storage. We check that changes that
+	// are made before calling into CHARLIE are not discarded.
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		if ctx.input_data[0] == 0 {
+			let info = ctx.ext.contract_info();
+			assert_eq!(info.storage_byte_deposit, 0);
+			info.storage_byte_deposit = 42;
+			assert_eq!(
+				ctx.ext
+					.call(
+						Weight::zero(),
+						U256::zero(),
+						&CHARLIE_ADDR,
+						U256::zero(),
+						vec![],
+						true,
+						false
+					)
+					.map(|_| ctx.ext.last_frame_output().clone()),
+				exec_trapped()
+			);
+			assert_eq!(ctx.ext.contract_info().storage_byte_deposit, 42);
+		}
+		exec_success()
+	});
+	let code_charlie = MockLoader::insert(Call, |ctx, _| {
+		assert!(ctx
+			.ext
+			.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false)
+			.is_ok());
+		exec_trapped()
+	});
+
+	// This one tests passing the input data into a contract via call.
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		place_contract(&CHARLIE, code_charlie);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn recursive_call_during_constructor_is_balance_transfer() {
+	let code = MockLoader::insert(Constructor, |ctx, _| {
+		let account_id = ctx.ext.account_id().clone();
+		let addr =
+			<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_address(&account_id);
+		let balance = ctx.ext.balance();
+
+		// Calling ourselves during the constructor will trigger a balance
+		// transfer since no contract exist yet.
+		assert_ok!(ctx.ext.call(
+			Weight::zero(),
+			U256::zero(),
+			&addr,
+			(balance - 1).into(),
+			vec![],
+			true,
+			false
+		));
+
+		// Should also work with call data set as it is ignored when no
+		// contract is deployed.
+		assert_ok!(ctx.ext.call(
+			Weight::zero(),
+			U256::zero(),
+			&addr,
+			1u32.into(),
+			vec![1, 2, 3, 4],
+			true,
+			false
+		));
+		exec_success()
+	});
+
+	// This one tests passing the input data into a contract via instantiate.
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.build()
+		.execute_with(|| {
+			let min_balance = <Test as Config>::Currency::minimum_balance();
+			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+			let executable = MockExecutable::from_storage(code, &mut gas_meter).unwrap();
+			set_balance(&ALICE, min_balance * 10_000);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter =
+				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), min_balance).unwrap();
+
+			let result = MockStack::run_instantiate(
+				ALICE,
+				executable,
+				&mut gas_meter,
+				&mut storage_meter,
+				10u64.into(),
+				vec![],
+				Some(&[0; 32]),
+				false,
+			);
+			assert_matches!(result, Ok(_));
+		});
+}
+
+#[test]
+fn cannot_send_more_balance_than_available_to_self() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		let account_id = ctx.ext.account_id().clone();
+		let addr =
+			<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_address(&account_id);
+		let balance = ctx.ext.balance();
+
+		assert_err!(
+			ctx.ext.call(
+				Weight::zero(),
+				U256::zero(),
+				&addr,
+				(balance + 1).into(),
+				vec![],
+				true,
+				false
+			),
+			<Error<Test>>::TransferFailed,
+		);
+		exec_success()
+	});
+
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.build()
+		.execute_with(|| {
+			let min_balance = <Test as Config>::Currency::minimum_balance();
+			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+			set_balance(&ALICE, min_balance * 10);
+			place_contract(&BOB, code_hash);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+			MockStack::run_call(
+				origin,
+				BOB_ADDR,
+				&mut gas_meter,
+				&mut storage_meter,
+				U256::zero(),
+				vec![],
+				false,
+			)
+			.unwrap();
+		});
+}
+
+#[test]
+fn call_reentry_direct_recursion() {
+	// call the contract passed as input with disabled reentry
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		let dest = H160::from_slice(ctx.input_data.as_ref());
+		ctx.ext
+			.call(Weight::zero(), U256::zero(), &dest, U256::zero(), vec![], false, false)
+			.map(|_| ctx.ext.last_frame_output().clone())
+	});
+
+	let code_charlie = MockLoader::insert(Call, |_, _| exec_success());
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		place_contract(&CHARLIE, code_charlie);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		// Calling another contract should succeed
+		assert_ok!(MockStack::run_call(
+			origin.clone(),
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			CHARLIE_ADDR.as_bytes().to_vec(),
+			false,
+		));
+
+		// Calling into oneself fails
+		assert_err!(
+			MockStack::run_call(
+				origin,
+				BOB_ADDR,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				BOB_ADDR.as_bytes().to_vec(),
+				false,
+			)
+			.map_err(|e| e.error),
+			<Error<Test>>::ReentranceDenied,
+		);
+	});
+}
+
+#[test]
+fn call_deny_reentry() {
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		if ctx.input_data[0] == 0 {
+			ctx.ext
+				.call(
+					Weight::zero(),
+					U256::zero(),
+					&CHARLIE_ADDR,
+					U256::zero(),
+					vec![],
+					false,
+					false,
+				)
+				.map(|_| ctx.ext.last_frame_output().clone())
+		} else {
+			exec_success()
+		}
+	});
+
+	// call BOB with input set to '1'
+	let code_charlie = MockLoader::insert(Call, |ctx, _| {
+		ctx.ext
+			.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![1], true, false)
+			.map(|_| ctx.ext.last_frame_output().clone())
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		place_contract(&CHARLIE, code_charlie);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		// BOB -> CHARLIE -> BOB fails as BOB denies reentry.
+		assert_err!(
+			MockStack::run_call(
+				origin,
+				BOB_ADDR,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				vec![0],
+				false,
+			)
+			.map_err(|e| e.error),
+			<Error<Test>>::ReentranceDenied,
+		);
+	});
+}
+
+#[test]
+fn call_runtime_works() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		let call = RuntimeCall::System(frame_system::Call::remark_with_event {
+			remark: b"Hello World".to_vec(),
+		});
+		ctx.ext.call_runtime(call).unwrap();
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		let min_balance = <Test as Config>::Currency::minimum_balance();
+
+		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+		set_balance(&ALICE, min_balance * 10);
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		System::reset_events();
+		MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut gas_meter,
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		)
+		.unwrap();
+
+		let remark_hash = <Test as frame_system::Config>::Hashing::hash(b"Hello World");
+		assert_eq!(
+			System::events(),
+			vec![EventRecord {
+				phase: Phase::Initialization,
+				event: MetaEvent::System(frame_system::Event::Remarked {
+					sender: BOB_FALLBACK,
+					hash: remark_hash
+				}),
+				topics: vec![],
+			},]
+		);
+	});
+}
+
+#[test]
+fn call_runtime_filter() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		use frame_system::Call as SysCall;
+		use pallet_balances::Call as BalanceCall;
+		use pallet_utility::Call as UtilCall;
+
+		// remark should still be allowed
+		let allowed_call =
+			RuntimeCall::System(SysCall::remark_with_event { remark: b"Hello".to_vec() });
+
+		// transfers are disallowed by the `TestFiler` (see below)
+		let forbidden_call =
+			RuntimeCall::Balances(BalanceCall::transfer_allow_death { dest: CHARLIE, value: 22 });
+
+		// simple cases: direct call
+		assert_err!(
+			ctx.ext.call_runtime(forbidden_call.clone()),
+			frame_system::Error::<Test>::CallFiltered
+		);
+
+		// as part of a patch: return is OK (but it interrupted the batch)
+		assert_ok!(ctx.ext.call_runtime(RuntimeCall::Utility(UtilCall::batch {
+			calls: vec![allowed_call.clone(), forbidden_call, allowed_call]
+		})),);
+
+		// the transfer wasn't performed
+		assert_eq!(get_balance(&CHARLIE), 0);
+
+		exec_success()
+	});
+
+	TestFilter::set_filter(|call| match call {
+		RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) => false,
+		_ => true,
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		let min_balance = <Test as Config>::Currency::minimum_balance();
+
+		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+		set_balance(&ALICE, min_balance * 10);
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		System::reset_events();
+		MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut gas_meter,
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		)
+		.unwrap();
+
+		let remark_hash = <Test as frame_system::Config>::Hashing::hash(b"Hello");
+		assert_eq!(
+			System::events(),
+			vec![
+				EventRecord {
+					phase: Phase::Initialization,
+					event: MetaEvent::System(frame_system::Event::Remarked {
+						sender: BOB_FALLBACK,
+						hash: remark_hash
+					}),
+					topics: vec![],
+				},
+				EventRecord {
+					phase: Phase::Initialization,
+					event: MetaEvent::Utility(pallet_utility::Event::ItemCompleted),
+					topics: vec![],
+				},
+				EventRecord {
+					phase: Phase::Initialization,
+					event: MetaEvent::Utility(pallet_utility::Event::BatchInterrupted {
+						index: 1,
+						error: frame_system::Error::<Test>::CallFiltered.into()
+					},),
+					topics: vec![],
+				},
+			]
+		);
+	});
+}
+
+#[test]
+fn nonce() {
+	let fail_code = MockLoader::insert(Constructor, |_, _| exec_trapped());
+	let success_code = MockLoader::insert(Constructor, |_, _| exec_success());
+	let succ_fail_code = MockLoader::insert(Constructor, move |ctx, _| {
+		ctx.ext
+			.instantiate(
+				Weight::MAX,
+				U256::MAX,
+				fail_code,
+				ctx.ext.minimum_balance() * 100,
+				vec![],
+				Some(&[0; 32]),
+			)
+			.ok();
+		exec_success()
+	});
+	let succ_succ_code = MockLoader::insert(Constructor, move |ctx, _| {
+		let alice_nonce = System::account_nonce(&ALICE);
+		assert_eq!(System::account_nonce(ctx.ext.account_id()), 0);
+		assert_eq!(ctx.ext.caller().account_id().unwrap(), &ALICE);
+		let addr = ctx
+			.ext
+			.instantiate(
+				Weight::MAX,
+				U256::MAX,
+				success_code,
+				ctx.ext.minimum_balance() * 100,
+				vec![],
+				Some(&[0; 32]),
+			)
+			.unwrap();
+
+		let account_id =
+			<<Test as Config>::AddressMapper as AddressMapper<Test>>::to_fallback_account_id(&addr);
+
+		assert_eq!(System::account_nonce(&ALICE), alice_nonce);
+		assert_eq!(System::account_nonce(ctx.ext.account_id()), 1);
+		assert_eq!(System::account_nonce(&account_id), 0);
+
+		// a plain call should not influence the account counter
+		ctx.ext
+			.call(Weight::zero(), U256::zero(), &addr, U256::zero(), vec![], false, false)
+			.unwrap();
+
+		assert_eq!(System::account_nonce(ALICE), alice_nonce);
+		assert_eq!(System::account_nonce(ctx.ext.account_id()), 1);
+		assert_eq!(System::account_nonce(&account_id), 0);
+
+		exec_success()
+	});
+
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.build()
+		.execute_with(|| {
+			let min_balance = <Test as Config>::Currency::minimum_balance();
+			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+			let fail_executable = MockExecutable::from_storage(fail_code, &mut gas_meter).unwrap();
+			let success_executable =
+				MockExecutable::from_storage(success_code, &mut gas_meter).unwrap();
+			let succ_fail_executable =
+				MockExecutable::from_storage(succ_fail_code, &mut gas_meter).unwrap();
+			let succ_succ_executable =
+				MockExecutable::from_storage(succ_succ_code, &mut gas_meter).unwrap();
+			set_balance(&ALICE, min_balance * 10_000);
+			set_balance(&BOB, min_balance * 10_000);
+			let origin = Origin::from_account_id(BOB);
+			let mut storage_meter =
+				storage::meter::Meter::new(&origin, deposit_limit::<Test>(), min_balance * 100)
+					.unwrap();
+
+			// fail should not increment
+			MockStack::run_instantiate(
+				ALICE,
+				fail_executable,
+				&mut gas_meter,
+				&mut storage_meter,
+				(min_balance * 100).into(),
+				vec![],
+				Some(&[0; 32]),
+				false,
+			)
+			.ok();
+			assert_eq!(System::account_nonce(&ALICE), 0);
+
+			assert_ok!(MockStack::run_instantiate(
+				ALICE,
+				success_executable,
+				&mut gas_meter,
+				&mut storage_meter,
+				(min_balance * 100).into(),
+				vec![],
+				Some(&[0; 32]),
+				false,
+			));
+			assert_eq!(System::account_nonce(&ALICE), 1);
+
+			assert_ok!(MockStack::run_instantiate(
+				ALICE,
+				succ_fail_executable,
+				&mut gas_meter,
+				&mut storage_meter,
+				(min_balance * 200).into(),
+				vec![],
+				Some(&[0; 32]),
+				false,
+			));
+			assert_eq!(System::account_nonce(&ALICE), 2);
+
+			assert_ok!(MockStack::run_instantiate(
+				ALICE,
+				succ_succ_executable,
+				&mut gas_meter,
+				&mut storage_meter,
+				(min_balance * 200).into(),
+				vec![],
+				Some(&[0; 32]),
+				false,
+			));
+			assert_eq!(System::account_nonce(&ALICE), 3);
+		});
+}
+
+#[test]
+fn set_storage_works() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		// Write
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![4, 5, 6]), true),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(ctx.ext.set_storage(&Key::Fix([3; 32]), None, false), Ok(WriteOutcome::New));
+		assert_eq!(ctx.ext.set_storage(&Key::Fix([4; 32]), None, true), Ok(WriteOutcome::New));
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([5; 32]), Some(vec![]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([6; 32]), Some(vec![]), true),
+			Ok(WriteOutcome::New)
+		);
+
+		// Overwrite
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![42]), false),
+			Ok(WriteOutcome::Overwritten(3))
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![48]), true),
+			Ok(WriteOutcome::Taken(vec![4, 5, 6]))
+		);
+		assert_eq!(ctx.ext.set_storage(&Key::Fix([3; 32]), None, false), Ok(WriteOutcome::New));
+		assert_eq!(ctx.ext.set_storage(&Key::Fix([4; 32]), None, true), Ok(WriteOutcome::New));
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([5; 32]), Some(vec![]), false),
+			Ok(WriteOutcome::Overwritten(0))
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([6; 32]), Some(vec![]), true),
+			Ok(WriteOutcome::Taken(vec![]))
+		);
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		let min_balance = <Test as Config>::Currency::minimum_balance();
+
+		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+		set_balance(&ALICE, min_balance * 1000);
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter =
+			storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut gas_meter,
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		));
+	});
+}
+
+#[test]
+fn set_storage_varsized_key_works() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		// Write
+		assert_eq!(
+			ctx.ext.set_storage(
+				&Key::try_from_var([1; 64].to_vec()).unwrap(),
+				Some(vec![1, 2, 3]),
+				false
+			),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_storage(
+				&Key::try_from_var([2; 19].to_vec()).unwrap(),
+				Some(vec![4, 5, 6]),
+				true
+			),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::try_from_var([3; 19].to_vec()).unwrap(), None, false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::try_from_var([4; 64].to_vec()).unwrap(), None, true),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext
+				.set_storage(&Key::try_from_var([5; 30].to_vec()).unwrap(), Some(vec![]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext
+				.set_storage(&Key::try_from_var([6; 128].to_vec()).unwrap(), Some(vec![]), true),
+			Ok(WriteOutcome::New)
+		);
+
+		// Overwrite
+		assert_eq!(
+			ctx.ext.set_storage(
+				&Key::try_from_var([1; 64].to_vec()).unwrap(),
+				Some(vec![42, 43, 44]),
+				false
+			),
+			Ok(WriteOutcome::Overwritten(3))
+		);
+		assert_eq!(
+			ctx.ext.set_storage(
+				&Key::try_from_var([2; 19].to_vec()).unwrap(),
+				Some(vec![48]),
+				true
+			),
+			Ok(WriteOutcome::Taken(vec![4, 5, 6]))
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::try_from_var([3; 19].to_vec()).unwrap(), None, false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::try_from_var([4; 64].to_vec()).unwrap(), None, true),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext
+				.set_storage(&Key::try_from_var([5; 30].to_vec()).unwrap(), Some(vec![]), false),
+			Ok(WriteOutcome::Overwritten(0))
+		);
+		assert_eq!(
+			ctx.ext
+				.set_storage(&Key::try_from_var([6; 128].to_vec()).unwrap(), Some(vec![]), true),
+			Ok(WriteOutcome::Taken(vec![]))
+		);
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		let min_balance = <Test as Config>::Currency::minimum_balance();
+
+		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+		set_balance(&ALICE, min_balance * 1000);
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter =
+			storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut gas_meter,
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		));
+	});
+}
+
+#[test]
+fn get_storage_works() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(ctx.ext.get_storage(&Key::Fix([1; 32])), Some(vec![1, 2, 3]));
+		assert_eq!(ctx.ext.get_storage(&Key::Fix([2; 32])), Some(vec![]));
+		assert_eq!(ctx.ext.get_storage(&Key::Fix([3; 32])), None);
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		let min_balance = <Test as Config>::Currency::minimum_balance();
+
+		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+		set_balance(&ALICE, min_balance * 1000);
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter =
+			storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut gas_meter,
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		));
+	});
+}
+
+#[test]
+fn get_storage_size_works() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_storage(&Key::Fix([2; 32]), Some(vec![]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(ctx.ext.get_storage_size(&Key::Fix([1; 32])), Some(3));
+		assert_eq!(ctx.ext.get_storage_size(&Key::Fix([2; 32])), Some(0));
+		assert_eq!(ctx.ext.get_storage_size(&Key::Fix([3; 32])), None);
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		let min_balance = <Test as Config>::Currency::minimum_balance();
+
+		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+		set_balance(&ALICE, min_balance * 1000);
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter =
+			storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut gas_meter,
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		));
+	});
+}
+
+#[test]
+fn get_storage_varsized_key_works() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		assert_eq!(
+			ctx.ext.set_storage(
+				&Key::try_from_var([1; 19].to_vec()).unwrap(),
+				Some(vec![1, 2, 3]),
+				false
+			),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext
+				.set_storage(&Key::try_from_var([2; 16].to_vec()).unwrap(), Some(vec![]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.get_storage(&Key::try_from_var([1; 19].to_vec()).unwrap()),
+			Some(vec![1, 2, 3])
+		);
+		assert_eq!(
+			ctx.ext.get_storage(&Key::try_from_var([2; 16].to_vec()).unwrap()),
+			Some(vec![])
+		);
+		assert_eq!(ctx.ext.get_storage(&Key::try_from_var([3; 8].to_vec()).unwrap()), None);
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		let min_balance = <Test as Config>::Currency::minimum_balance();
+
+		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+		set_balance(&ALICE, min_balance * 1000);
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter =
+			storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut gas_meter,
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		));
+	});
+}
+
+#[test]
+fn get_storage_size_varsized_key_works() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		assert_eq!(
+			ctx.ext.set_storage(
+				&Key::try_from_var([1; 19].to_vec()).unwrap(),
+				Some(vec![1, 2, 3]),
+				false
+			),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext
+				.set_storage(&Key::try_from_var([2; 16].to_vec()).unwrap(), Some(vec![]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.get_storage_size(&Key::try_from_var([1; 19].to_vec()).unwrap()),
+			Some(3)
+		);
+		assert_eq!(
+			ctx.ext.get_storage_size(&Key::try_from_var([2; 16].to_vec()).unwrap()),
+			Some(0)
+		);
+		assert_eq!(ctx.ext.get_storage_size(&Key::try_from_var([3; 8].to_vec()).unwrap()), None);
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		let min_balance = <Test as Config>::Currency::minimum_balance();
+
+		let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+		set_balance(&ALICE, min_balance * 1000);
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter =
+			storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut gas_meter,
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		));
+	});
+}
+
+#[test]
+fn set_transient_storage_works() {
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		// Write
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([2; 32]), Some(vec![4, 5, 6]), true),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([3; 32]), None, false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([4; 32]), None, true),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([5; 32]), Some(vec![]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([6; 32]), Some(vec![]), true),
+			Ok(WriteOutcome::New)
+		);
+
+		// Overwrite
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([1; 32]), Some(vec![42]), false),
+			Ok(WriteOutcome::Overwritten(3))
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([2; 32]), Some(vec![48]), true),
+			Ok(WriteOutcome::Taken(vec![4, 5, 6]))
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([3; 32]), None, false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([4; 32]), None, true),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([5; 32]), Some(vec![]), false),
+			Ok(WriteOutcome::Overwritten(0))
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(&Key::Fix([6; 32]), Some(vec![]), true),
+			Ok(WriteOutcome::Taken(vec![]))
+		);
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter =
+			storage::meter::Meter::new(&origin, deposit_limit::<Test>(), 0).unwrap();
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		));
+	});
+}
+
+#[test]
+fn get_transient_storage_works() {
+	// Call stack: BOB -> CHARLIE(success) -> BOB' (success)
+	let storage_key_1 = &Key::Fix([1; 32]);
+	let storage_key_2 = &Key::Fix([2; 32]);
+	let storage_key_3 = &Key::Fix([3; 32]);
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		if ctx.input_data[0] == 0 {
+			assert_eq!(
+				ctx.ext.set_transient_storage(storage_key_1, Some(vec![1, 2]), false),
+				Ok(WriteOutcome::New)
+			);
+			assert_eq!(
+				ctx.ext
+					.call(
+						Weight::zero(),
+						U256::zero(),
+						&CHARLIE_ADDR,
+						U256::zero(),
+						vec![],
+						true,
+						false,
+					)
+					.map(|_| ctx.ext.last_frame_output().clone()),
+				exec_success()
+			);
+			assert_eq!(ctx.ext.get_transient_storage(storage_key_1), Some(vec![3]));
+			assert_eq!(ctx.ext.get_transient_storage(storage_key_2), Some(vec![]));
+			assert_eq!(ctx.ext.get_transient_storage(storage_key_3), None);
+		} else {
+			assert_eq!(
+				ctx.ext.set_transient_storage(storage_key_1, Some(vec![3]), true),
+				Ok(WriteOutcome::Taken(vec![1, 2]))
+			);
+			assert_eq!(
+				ctx.ext.set_transient_storage(storage_key_2, Some(vec![]), false),
+				Ok(WriteOutcome::New)
+			);
+		}
+		exec_success()
+	});
+	let code_charlie = MockLoader::insert(Call, |ctx, _| {
+		assert!(ctx
+			.ext
+			.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false)
+			.is_ok());
+		// CHARLIE can not read BOB`s storage.
+		assert_eq!(ctx.ext.get_transient_storage(storage_key_1), None);
+		exec_success()
+	});
+
+	// This one tests passing the input data into a contract via call.
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		place_contract(&CHARLIE, code_charlie);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn get_transient_storage_size_works() {
+	let storage_key_1 = &Key::Fix([1; 32]);
+	let storage_key_2 = &Key::Fix([2; 32]);
+	let storage_key_3 = &Key::Fix([3; 32]);
+	let code_hash = MockLoader::insert(Call, |ctx, _| {
+		assert_eq!(
+			ctx.ext.set_transient_storage(storage_key_1, Some(vec![1, 2, 3]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(
+			ctx.ext.set_transient_storage(storage_key_2, Some(vec![]), false),
+			Ok(WriteOutcome::New)
+		);
+		assert_eq!(ctx.ext.get_transient_storage_size(storage_key_1), Some(3));
+		assert_eq!(ctx.ext.get_transient_storage_size(storage_key_2), Some(0));
+		assert_eq!(ctx.ext.get_transient_storage_size(storage_key_3), None);
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_hash);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		assert_ok!(MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		));
+	});
+}
+
+#[test]
+fn rollback_transient_storage_works() {
+	// Call stack: BOB -> CHARLIE (trap) -> BOB' (success)
+	let storage_key = &Key::Fix([1; 32]);
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		if ctx.input_data[0] == 0 {
+			assert_eq!(
+				ctx.ext.set_transient_storage(storage_key, Some(vec![1, 2]), false),
+				Ok(WriteOutcome::New)
+			);
+			assert_eq!(
+				ctx.ext
+					.call(
+						Weight::zero(),
+						U256::zero(),
+						&CHARLIE_ADDR,
+						U256::zero(),
+						vec![],
+						true,
+						false
+					)
+					.map(|_| ctx.ext.last_frame_output().clone()),
+				exec_trapped()
+			);
+			assert_eq!(ctx.ext.get_transient_storage(storage_key), Some(vec![1, 2]));
+		} else {
+			let overwritten_length = ctx.ext.get_transient_storage_size(storage_key).unwrap();
+			assert_eq!(
+				ctx.ext.set_transient_storage(storage_key, Some(vec![3]), false),
+				Ok(WriteOutcome::Overwritten(overwritten_length))
+			);
+			assert_eq!(ctx.ext.get_transient_storage(storage_key), Some(vec![3]));
+		}
+		exec_success()
+	});
+	let code_charlie = MockLoader::insert(Call, |ctx, _| {
+		assert!(ctx
+			.ext
+			.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false)
+			.is_ok());
+		exec_trapped()
+	});
+
+	// This one tests passing the input data into a contract via call.
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		place_contract(&CHARLIE, code_charlie);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn ecdsa_to_eth_address_returns_proper_value() {
+	let bob_ch = MockLoader::insert(Call, |ctx, _| {
+		let pubkey_compressed = array_bytes::hex2array_unchecked(
+			"028db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd91",
+		);
+		assert_eq!(
+			ctx.ext.ecdsa_to_eth_address(&pubkey_compressed).unwrap(),
+			array_bytes::hex2array_unchecked::<_, 20>("09231da7b19A016f9e576d23B16277062F4d46A8")
+		);
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, bob_ch);
+
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn last_frame_output_works_on_instantiate() {
+	let ok_ch = MockLoader::insert(Constructor, move |_, _| {
+		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] })
+	});
+	let revert_ch = MockLoader::insert(Constructor, move |_, _| {
+		Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] })
+	});
+	let trap_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into()));
+	let instantiator_ch = MockLoader::insert(Call, {
+		move |ctx, _| {
+			let value = <Test as Config>::Currency::minimum_balance().into();
+
+			// Successful instantiation should set the output
+			let address =
+				ctx.ext.instantiate(Weight::MAX, U256::MAX, ok_ch, value, vec![], None).unwrap();
+			assert_eq!(
+				ctx.ext.last_frame_output(),
+				&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] }
+			);
+
+			// Balance transfers should reset the output
+			ctx.ext
+				.call(Weight::MAX, U256::MAX, &address, U256::from(1), vec![], true, false)
+				.unwrap();
+			assert_eq!(ctx.ext.last_frame_output(), &Default::default());
+
+			// Reverted instantiation should set the output
+			ctx.ext
+				.instantiate(Weight::zero(), U256::zero(), revert_ch, value, vec![], None)
+				.unwrap();
+			assert_eq!(
+				ctx.ext.last_frame_output(),
+				&ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] }
+			);
+
+			// Trapped instantiation should clear the output
+			ctx.ext
+				.instantiate(Weight::zero(), U256::zero(), trap_ch, value, vec![], None)
+				.unwrap_err();
+			assert_eq!(
+				ctx.ext.last_frame_output(),
+				&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![] }
+			);
+
+			exec_success()
+		}
+	});
+
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			set_balance(&ALICE, 1000);
+			set_balance(&BOB, 100);
+			place_contract(&BOB, instantiator_ch);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
+
+			MockStack::run_call(
+				origin,
+				BOB_ADDR,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				vec![],
+				false,
+			)
+			.unwrap()
+		});
+}
+
+#[test]
+fn last_frame_output_works_on_nested_call() {
+	// Call stack: BOB -> CHARLIE(revert) -> BOB' (success)
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		if ctx.input_data.is_empty() {
+			// We didn't do anything yet
+			assert_eq!(
+				ctx.ext.last_frame_output(),
+				&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![] }
+			);
+
+			ctx.ext
+				.call(
+					Weight::zero(),
+					U256::zero(),
+					&CHARLIE_ADDR,
+					U256::zero(),
+					vec![],
+					true,
+					false,
+				)
+				.unwrap();
+			assert_eq!(
+				ctx.ext.last_frame_output(),
+				&ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] }
+			);
+		}
+
+		Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] })
+	});
+	let code_charlie = MockLoader::insert(Call, |ctx, _| {
+		// We didn't do anything yet
+		assert_eq!(
+			ctx.ext.last_frame_output(),
+			&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![] }
+		);
+
+		assert!(ctx
+			.ext
+			.call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false)
+			.is_ok());
+		assert_eq!(
+			ctx.ext.last_frame_output(),
+			&ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] }
+		);
+
+		Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] })
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		place_contract(&CHARLIE, code_charlie);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![0],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn last_frame_output_is_always_reset() {
+	let code_bob = MockLoader::insert(Call, |ctx, _| {
+		let invalid_code_hash = H256::from_low_u64_le(u64::MAX);
+		let output_revert = || ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1] };
+
+		// A value of u256::MAX to fail the call on the first condition.
+		*ctx.ext.last_frame_output_mut() = output_revert();
+		assert_eq!(
+			ctx.ext.call(
+				Weight::zero(),
+				U256::zero(),
+				&H160::zero(),
+				U256::max_value(),
+				vec![],
+				true,
+				false,
+			),
+			Err(Error::<Test>::BalanceConversionFailed.into())
+		);
+		assert_eq!(ctx.ext.last_frame_output(), &Default::default());
+
+		// An unknown code hash to fail the delegate_call on the first condition.
+		*ctx.ext.last_frame_output_mut() = output_revert();
+		assert_eq!(
+			ctx.ext.delegate_call(
+				Weight::zero(),
+				U256::zero(),
+				H160([0xff; 20]),
+				Default::default()
+			),
+			Err(Error::<Test>::CodeNotFound.into())
+		);
+		assert_eq!(ctx.ext.last_frame_output(), &Default::default());
+
+		// An unknown code hash to fail instantiation on the first condition.
+		*ctx.ext.last_frame_output_mut() = output_revert();
+		assert_eq!(
+			ctx.ext.instantiate(
+				Weight::zero(),
+				U256::zero(),
+				invalid_code_hash,
+				U256::zero(),
+				vec![],
+				None,
+			),
+			Err(Error::<Test>::CodeNotFound.into())
+		);
+		assert_eq!(ctx.ext.last_frame_output(), &Default::default());
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		place_contract(&BOB, code_bob);
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+
+		let result = MockStack::run_call(
+			origin,
+			BOB_ADDR,
+			&mut GasMeter::<Test>::new(GAS_LIMIT),
+			&mut storage_meter,
+			U256::zero(),
+			vec![],
+			false,
+		);
+		assert_matches!(result, Ok(_));
+	});
+}
+
+#[test]
+fn immutable_data_access_checks_work() {
+	let dummy_ch = MockLoader::insert(Constructor, move |ctx, _| {
+		// Calls can not store immutable data
+		assert_eq!(ctx.ext.get_immutable_data(), Err(Error::<Test>::InvalidImmutableAccess.into()));
+		exec_success()
+	});
+	let instantiator_ch = MockLoader::insert(Call, {
+		move |ctx, _| {
+			let value = <Test as Config>::Currency::minimum_balance().into();
+
+			assert_eq!(
+				ctx.ext.set_immutable_data(vec![0, 1, 2, 3].try_into().unwrap()),
+				Err(Error::<Test>::InvalidImmutableAccess.into())
+			);
+
+			// Constructors can not access the immutable data
+			ctx.ext
+				.instantiate(Weight::MAX, U256::MAX, dummy_ch, value, vec![], None)
+				.unwrap();
+
+			exec_success()
+		}
+	});
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			set_balance(&ALICE, 1000);
+			set_balance(&BOB, 100);
+			place_contract(&BOB, instantiator_ch);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
+
+			MockStack::run_call(
+				origin,
+				BOB_ADDR,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				vec![],
+				false,
+			)
+			.unwrap()
+		});
+}
+
+#[test]
+fn correct_immutable_data_in_delegate_call() {
+	let charlie_ch = MockLoader::insert(Call, |ctx, _| {
+		Ok(ExecReturnValue {
+			flags: ReturnFlags::empty(),
+			data: ctx.ext.get_immutable_data()?.to_vec(),
+		})
+	});
+	let bob_ch = MockLoader::insert(Call, move |ctx, _| {
+		// In a regular call, we should witness the callee immutable data
+		assert_eq!(
+			ctx.ext
+				.call(
+					Weight::zero(),
+					U256::zero(),
+					&CHARLIE_ADDR,
+					U256::zero(),
+					vec![],
+					true,
+					false,
+				)
+				.map(|_| ctx.ext.last_frame_output().data.clone()),
+			Ok(vec![2]),
+		);
+
+		// Also in a delegate call, we should witness the callee immutable data
+		assert_eq!(
+			ctx.ext
+				.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())
+				.map(|_| ctx.ext.last_frame_output().data.clone()),
+			Ok(vec![2])
+		);
+
+		exec_success()
+	});
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			place_contract(&BOB, bob_ch);
+			place_contract(&CHARLIE, charlie_ch);
+
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
+
+			// Place unique immutable data for each contract
+			<ImmutableDataOf<Test>>::insert::<_, ImmutableData>(
+				BOB_ADDR,
+				vec![1].try_into().unwrap(),
+			);
+			<ImmutableDataOf<Test>>::insert::<_, ImmutableData>(
+				CHARLIE_ADDR,
+				vec![2].try_into().unwrap(),
+			);
+
+			MockStack::run_call(
+				origin,
+				BOB_ADDR,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				vec![],
+				false,
+			)
+			.unwrap()
+		});
+}
+
+#[test]
+fn immutable_data_set_overrides() {
+	let hash = MockLoader::insert_both(
+		move |ctx, _| {
+			// Calling `set_immutable_data` the first time should work
+			assert_ok!(ctx.ext.set_immutable_data(vec![0, 1, 2, 3].try_into().unwrap()));
+			// Calling `set_immutable_data` the second time overrides the original one
+			assert_ok!(ctx.ext.set_immutable_data(vec![7, 5].try_into().unwrap()));
+			exec_success()
+		},
+		move |ctx, _| {
+			assert_eq!(ctx.ext.get_immutable_data().unwrap().into_inner(), vec![7, 5]);
+			exec_success()
+		},
+	);
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			set_balance(&ALICE, 1000);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
+			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
+
+			let addr = MockStack::run_instantiate(
+				ALICE,
+				MockExecutable::from_storage(hash, &mut gas_meter).unwrap(),
+				&mut gas_meter,
+				&mut storage_meter,
+				U256::zero(),
+				vec![],
+				None,
+				false,
+			)
+			.unwrap()
+			.0;
+
+			MockStack::run_call(
+				origin,
+				addr,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				vec![],
+				false,
+			)
+			.unwrap()
+		});
+}
+
+#[test]
+fn immutable_data_set_errors_with_empty_data() {
+	let dummy_ch = MockLoader::insert(Constructor, move |ctx, _| {
+		// Calling `set_immutable_data` with empty data should error out
+		assert_eq!(
+			ctx.ext.set_immutable_data(Default::default()),
+			Err(Error::<Test>::InvalidImmutableAccess.into())
+		);
+		exec_success()
+	});
+	let instantiator_ch = MockLoader::insert(Call, {
+		move |ctx, _| {
+			let value = <Test as Config>::Currency::minimum_balance().into();
+			ctx.ext
+				.instantiate(Weight::MAX, U256::MAX, dummy_ch, value, vec![], None)
+				.unwrap();
+
+			exec_success()
+		}
+	});
+	ExtBuilder::default()
+		.with_code_hashes(MockLoader::code_hashes())
+		.existential_deposit(15)
+		.build()
+		.execute_with(|| {
+			set_balance(&ALICE, 1000);
+			set_balance(&BOB, 100);
+			place_contract(&BOB, instantiator_ch);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap();
+
+			MockStack::run_call(
+				origin,
+				BOB_ADDR,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				vec![],
+				false,
+			)
+			.unwrap()
+		});
+}
+
+#[test]
+fn block_hash_returns_proper_values() {
+	let bob_code_hash = MockLoader::insert(Call, |ctx, _| {
+		ctx.ext.block_number = 1u32.into();
+		assert_eq!(ctx.ext.block_hash(U256::from(1)), None);
+		assert_eq!(ctx.ext.block_hash(U256::from(0)), Some(H256::from([1; 32])));
+
+		ctx.ext.block_number = 300u32.into();
+		assert_eq!(ctx.ext.block_hash(U256::from(300)), None);
+		assert_eq!(ctx.ext.block_hash(U256::from(43)), None);
+		assert_eq!(ctx.ext.block_hash(U256::from(44)), Some(H256::from([2; 32])));
+
+		exec_success()
+	});
+
+	ExtBuilder::default().build().execute_with(|| {
+		frame_system::BlockHash::<Test>::insert(
+			&BlockNumberFor::<Test>::from(0u32),
+			<tests::Test as frame_system::Config>::Hash::from([1; 32]),
+		);
+		frame_system::BlockHash::<Test>::insert(
+			&BlockNumberFor::<Test>::from(1u32),
+			<tests::Test as frame_system::Config>::Hash::default(),
+		);
+		frame_system::BlockHash::<Test>::insert(
+			&BlockNumberFor::<Test>::from(43u32),
+			<tests::Test as frame_system::Config>::Hash::default(),
+		);
+		frame_system::BlockHash::<Test>::insert(
+			&BlockNumberFor::<Test>::from(44u32),
+			<tests::Test as frame_system::Config>::Hash::from([2; 32]),
+		);
+		frame_system::BlockHash::<Test>::insert(
+			&BlockNumberFor::<Test>::from(300u32),
+			<tests::Test as frame_system::Config>::Hash::default(),
+		);
+
+		place_contract(&BOB, bob_code_hash);
+
+		let origin = Origin::from_account_id(ALICE);
+		let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+		assert_matches!(
+			MockStack::run_call(
+				origin,
+				BOB_ADDR,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				vec![0],
+				false,
+			),
+			Ok(_)
+		);
+	});
+}
diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs
index fd6ffc55e40c34d8e78fa879078c0be2d1cf86b1..8e91c4ecfd1cd1cf90cd28a3dd6e278001b332b4 100644
--- a/substrate/frame/root-offences/src/lib.rs
+++ b/substrate/frame/root-offences/src/lib.rs
@@ -31,7 +31,7 @@ extern crate alloc;
 
 use alloc::vec::Vec;
 use pallet_session::historical::IdentificationTuple;
-use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking};
+use pallet_staking::Pallet as Staking;
 use sp_runtime::Perbill;
 use sp_staking::offence::OnOffenceHandler;
 
@@ -49,11 +49,8 @@ pub mod pallet {
 		+ pallet_staking::Config
 		+ pallet_session::Config<ValidatorId = <Self as frame_system::Config>::AccountId>
 		+ pallet_session::historical::Config<
-			FullIdentification = Exposure<
-				<Self as frame_system::Config>::AccountId,
-				BalanceOf<Self>,
-			>,
-			FullIdentificationOf = ExposureOf<Self>,
+			FullIdentification = (),
+			FullIdentificationOf = pallet_staking::NullIdentity,
 		>
 	{
 		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
@@ -106,15 +103,11 @@ pub mod pallet {
 		fn get_offence_details(
 			offenders: Vec<(T::AccountId, Perbill)>,
 		) -> Result<Vec<OffenceDetails<T>>, DispatchError> {
-			let now = pallet_staking::ActiveEra::<T>::get()
-				.map(|e| e.index)
-				.ok_or(Error::<T>::FailedToGetActiveEra)?;
-
 			Ok(offenders
 				.clone()
 				.into_iter()
 				.map(|(o, _)| OffenceDetails::<T> {
-					offender: (o.clone(), Staking::<T>::eras_stakers(now, &o)),
+					offender: (o.clone(), ()),
 					reporters: Default::default(),
 				})
 				.collect())
@@ -124,7 +117,7 @@ pub mod pallet {
 		fn submit_offence(offenders: &[OffenceDetails<T>], slash_fraction: &[Perbill]) {
 			let session_index = <pallet_session::Pallet<T> as frame_support::traits::ValidatorSet<T::AccountId>>::session_index();
 
-			<pallet_staking::Pallet<T> as OnOffenceHandler<
+			<Staking<T> as OnOffenceHandler<
 				T::AccountId,
 				IdentificationTuple<T>,
 				Weight,
diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs
index 2303221c8819a1440880ae24daccb893f4c8b50e..ce55bdcbdd3c4b0cca5523fd81953177a7649d3c 100644
--- a/substrate/frame/root-offences/src/mock.rs
+++ b/substrate/frame/root-offences/src/mock.rs
@@ -28,7 +28,7 @@ use frame_support::{
 	traits::{ConstU32, ConstU64, OneSessionHandler},
 	BoundedVec,
 };
-use pallet_staking::StakerStatus;
+use pallet_staking::{BalanceOf, StakerStatus};
 use sp_core::ConstBool;
 use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage};
 use sp_staking::{EraIndex, SessionIndex};
@@ -148,8 +148,8 @@ impl pallet_staking::Config for Test {
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Test>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 sp_runtime::impl_opaque_keys! {
@@ -167,6 +167,7 @@ impl pallet_session::Config for Test {
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = pallet_staking::StashOf<Test>;
 	type NextSessionRotation = pallet_session::PeriodicSessions<Period, Offset>;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
@@ -297,6 +298,11 @@ pub(crate) fn run_to_block(n: BlockNumber) {
 	);
 }
 
+/// Progress by n block.
+pub(crate) fn advance_blocks(n: u64) {
+	run_to_block(System::block_number() + n);
+}
+
 pub(crate) fn active_era() -> EraIndex {
 	pallet_staking::ActiveEra::<Test>::get().unwrap().index
 }
diff --git a/substrate/frame/root-offences/src/tests.rs b/substrate/frame/root-offences/src/tests.rs
index 289bb708efbbc0d9496499a2c9e8b25c240715f9..da6c49895bec1da8acff7baaf8bc20e82b4d5847 100644
--- a/substrate/frame/root-offences/src/tests.rs
+++ b/substrate/frame/root-offences/src/tests.rs
@@ -17,7 +17,10 @@
 
 use super::*;
 use frame_support::{assert_err, assert_ok};
-use mock::{active_era, start_session, ExtBuilder, RootOffences, RuntimeOrigin, System, Test as T};
+use mock::{
+	active_era, advance_blocks, start_session, ExtBuilder, RootOffences, RuntimeOrigin, System,
+	Test as T,
+};
 use pallet_staking::asset;
 
 #[test]
@@ -42,6 +45,10 @@ fn create_offence_works_given_root_origin() {
 		assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone()));
 
 		System::assert_last_event(Event::OffenceCreated { offenders }.into());
+
+		// offence is processed in the following block.
+		advance_blocks(1);
+
 		// the slash should be applied right away.
 		assert_eq!(asset::staked::<T>(&11), 500);
 
@@ -66,6 +73,9 @@ fn create_offence_wont_slash_non_active_validators() {
 
 		System::assert_last_event(Event::OffenceCreated { offenders }.into());
 
+		// advance to the next block so offence gets processed.
+		advance_blocks(1);
+
 		// so 31 didn't get slashed.
 		assert_eq!(asset::staked::<T>(&31), 500);
 
diff --git a/substrate/frame/safe-mode/src/lib.rs b/substrate/frame/safe-mode/src/lib.rs
index cfa9097b54121badf81bf6efa52afedbe96f6a06..110f46b0322045ffb8d183c1994612a4a6163754 100644
--- a/substrate/frame/safe-mode/src/lib.rs
+++ b/substrate/frame/safe-mode/src/lib.rs
@@ -238,7 +238,18 @@ pub mod pallet {
 	}
 
 	/// The reason why the safe-mode was deactivated.
-	#[derive(Copy, Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)]
+	#[derive(
+		Copy,
+		Clone,
+		PartialEq,
+		Eq,
+		RuntimeDebug,
+		Encode,
+		Decode,
+		DecodeWithMemTracking,
+		TypeInfo,
+		MaxEncodedLen,
+	)]
 	pub enum ExitReason {
 		/// The safe-mode was automatically deactivated after it's duration ran out.
 		Timeout,
diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs
index 2980f86abc2811143b137ce669c856c257d6760c..277fe20ec4122059297008bdc27d3f7fb5be7b34 100644
--- a/substrate/frame/safe-mode/src/mock.rs
+++ b/substrate/frame/safe-mode/src/mock.rs
@@ -91,6 +91,7 @@ impl pallet_utility::Config for Test {
 	PartialOrd,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	RuntimeDebug,
 	MaxEncodedLen,
 	scale_info::TypeInfo,
diff --git a/substrate/frame/salary/src/lib.rs b/substrate/frame/salary/src/lib.rs
index 6a843625f4a7bc61901b890f79135c183188d354..45c711e21c109f9d897c3dfdd137881349824abc 100644
--- a/substrate/frame/salary/src/lib.rs
+++ b/substrate/frame/salary/src/lib.rs
@@ -136,12 +136,11 @@ pub mod pallet {
 
 	/// The overall status of the system.
 	#[pallet::storage]
-	pub(super) type Status<T: Config<I>, I: 'static = ()> =
-		StorageValue<_, StatusOf<T, I>, OptionQuery>;
+	pub type Status<T: Config<I>, I: 'static = ()> = StorageValue<_, StatusOf<T, I>, OptionQuery>;
 
 	/// The status of a claimant.
 	#[pallet::storage]
-	pub(super) type Claimant<T: Config<I>, I: 'static = ()> =
+	pub type Claimant<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::AccountId, ClaimantStatusOf<T, I>, OptionQuery>;
 
 	#[pallet::event]
diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs
index 80ba7fd06da071a3c0fd96178d8d715dd7a2c150..2ad94ec04df47c24f9df6c13207b7263a26c825b 100644
--- a/substrate/frame/scheduler/src/lib.rs
+++ b/substrate/frame/scheduler/src/lib.rs
@@ -146,20 +146,20 @@ struct ScheduledV1<Call, BlockNumber> {
 }
 
 /// Information regarding an item to be executed in the future.
-#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))]
-#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)]
+#[derive(Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)]
 pub struct Scheduled<Name, Call, BlockNumber, PalletsOrigin, AccountId> {
 	/// The unique identity for this task, if there is one.
-	maybe_id: Option<Name>,
+	pub maybe_id: Option<Name>,
 	/// This task's priority.
-	priority: schedule::Priority,
+	pub priority: schedule::Priority,
 	/// The call to be dispatched.
-	call: Call,
+	pub call: Call,
 	/// If the call is periodic, then this points to the information concerning that.
-	maybe_periodic: Option<schedule::Period<BlockNumber>>,
+	pub maybe_periodic: Option<schedule::Period<BlockNumber>>,
 	/// The origin with which to dispatch the call.
-	origin: PalletsOrigin,
-	_phantom: PhantomData<AccountId>,
+	pub origin: PalletsOrigin,
+	#[doc(hidden)]
+	pub _phantom: PhantomData<AccountId>,
 }
 
 impl<Name, Call, BlockNumber, PalletsOrigin, AccountId>
@@ -351,7 +351,7 @@ pub mod pallet {
 	/// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4
 	/// identities.
 	#[pallet::storage]
-	pub(crate) type Lookup<T: Config> =
+	pub type Lookup<T: Config> =
 		StorageMap<_, Twox64Concat, TaskName, TaskAddress<BlockNumberFor<T>>>;
 
 	/// Events type.
diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs
index b0681f5aa000f7914f2147300457e8aee97ccd47..746c3b12e972b8fad920c7b4a552ecc89fe7d211 100644
--- a/substrate/frame/session/benchmarking/src/mock.rs
+++ b/substrate/frame/session/benchmarking/src/mock.rs
@@ -27,11 +27,11 @@ use frame_support::{
 	derive_impl, parameter_types,
 	traits::{ConstU32, ConstU64},
 };
+use pallet_staking::NullIdentity;
 use sp_runtime::{traits::IdentityLookup, BuildStorage, KeyTypeId};
 
 type AccountId = u64;
 type Nonce = u32;
-type Balance = u64;
 
 type Block = frame_system::mocking::MockBlock<Test>;
 
@@ -68,8 +68,8 @@ impl pallet_timestamp::Config for Test {
 	type WeightInfo = ();
 }
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Test>;
+	type FullIdentification = ();
+	type FullIdentificationOf = NullIdentity;
 }
 
 sp_runtime::impl_opaque_keys! {
@@ -104,6 +104,7 @@ impl pallet_session::Config for Test {
 	type RuntimeEvent = RuntimeEvent;
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = pallet_staking::StashOf<Test>;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 pallet_staking_reward_curve::build! {
diff --git a/substrate/frame/session/src/disabling.rs b/substrate/frame/session/src/disabling.rs
new file mode 100644
index 0000000000000000000000000000000000000000..0780f95ae421936b17ecb1bbdbbb665f3b426f22
--- /dev/null
+++ b/substrate/frame/session/src/disabling.rs
@@ -0,0 +1,199 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::*;
+use frame_support::defensive;
+/// Controls validator disabling
+pub trait DisablingStrategy<T: Config> {
+	/// Make a disabling decision. Returning a [`DisablingDecision`]
+	fn decision(
+		offender_stash: &T::ValidatorId,
+		offender_slash_severity: OffenceSeverity,
+		currently_disabled: &Vec<(u32, OffenceSeverity)>,
+	) -> DisablingDecision;
+}
+
+/// Helper struct representing a decision coming from a given [`DisablingStrategy`] implementing
+/// `decision`
+///
+/// `disable` is the index of the validator to disable,
+/// `reenable` is the index of the validator to re-enable.
+#[derive(Debug)]
+pub struct DisablingDecision {
+	pub disable: Option<u32>,
+	pub reenable: Option<u32>,
+}
+
+impl<T: Config> DisablingStrategy<T> for () {
+	fn decision(
+		_offender_stash: &T::ValidatorId,
+		_offender_slash_severity: OffenceSeverity,
+		_currently_disabled: &Vec<(u32, OffenceSeverity)>,
+	) -> DisablingDecision {
+		DisablingDecision { disable: None, reenable: None }
+	}
+}
+/// Calculate the disabling limit based on the number of validators and the disabling limit factor.
+///
+/// This is a sensible default implementation for the disabling limit factor for most disabling
+/// strategies.
+///
+/// Disabling limit factor n=2 -> 1/n = 1/2 = 50% of validators can be disabled
+fn factor_based_disable_limit(validators_len: usize, disabling_limit_factor: usize) -> usize {
+	validators_len
+		.saturating_sub(1)
+		.checked_div(disabling_limit_factor)
+		.unwrap_or_else(|| {
+			defensive!("DISABLING_LIMIT_FACTOR should not be 0");
+			0
+		})
+}
+
+/// Implementation of [`DisablingStrategy`] using factor_based_disable_limit which disables
+/// validators from the active set up to a threshold. `DISABLING_LIMIT_FACTOR` is the factor of the
+/// maximum disabled validators in the active set. E.g. setting this value to `3` means no more than
+/// 1/3 of the validators in the active set can be disabled in an era.
+///
+/// By default a factor of 3 is used which is the byzantine threshold.
+pub struct UpToLimitDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
+
+impl<const DISABLING_LIMIT_FACTOR: usize> UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR> {
+	/// Disabling limit calculated from the total number of validators in the active set. When
+	/// reached no more validators will be disabled.
+	pub fn disable_limit(validators_len: usize) -> usize {
+		factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
+	}
+}
+
+impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
+	for UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR>
+{
+	fn decision(
+		offender_stash: &T::ValidatorId,
+		_offender_slash_severity: OffenceSeverity,
+		currently_disabled: &Vec<(u32, OffenceSeverity)>,
+	) -> DisablingDecision {
+		let active_set = Validators::<T>::get();
+
+		// We don't disable more than the limit
+		if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
+			log!(
+				debug,
+				"Won't disable: reached disabling limit {:?}",
+				Self::disable_limit(active_set.len())
+			);
+			return DisablingDecision { disable: None, reenable: None }
+		}
+
+		let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
+			idx as u32
+		} else {
+			log!(debug, "Won't disable: offender not in active set",);
+			return DisablingDecision { disable: None, reenable: None }
+		};
+
+		log!(debug, "Will disable {:?}", offender_idx);
+
+		DisablingDecision { disable: Some(offender_idx), reenable: None }
+	}
+}
+
+/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a
+/// limit (factor_based_disable_limit) and if the limit is reached and the new offender is higher
+/// (bigger punishment/severity) then it re-enables the lowest offender to free up space for the new
+/// offender.
+///
+/// This strategy is not based on cumulative severity of offences but only on the severity of the
+/// highest offence. Offender first committing a 25% offence and then a 50% offence will be treated
+/// the same as an offender committing 50% offence.
+///
+/// An extension of [`UpToLimitDisablingStrategy`].
+pub struct UpToLimitWithReEnablingDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
+
+impl<const DISABLING_LIMIT_FACTOR: usize>
+	UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
+{
+	/// Disabling limit calculated from the total number of validators in the active set. When
+	/// reached re-enabling logic might kick in.
+	pub fn disable_limit(validators_len: usize) -> usize {
+		factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
+	}
+}
+
+impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
+	for UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
+{
+	fn decision(
+		offender_stash: &T::ValidatorId,
+		offender_slash_severity: OffenceSeverity,
+		currently_disabled: &Vec<(u32, OffenceSeverity)>,
+	) -> DisablingDecision {
+		let active_set = Validators::<T>::get();
+
+		// We don't disable validators that are not in the active set
+		let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
+			idx as u32
+		} else {
+			log!(debug, "Won't disable: offender not in active set",);
+			return DisablingDecision { disable: None, reenable: None }
+		};
+
+		// Check if offender is already disabled
+		if let Some((_, old_severity)) =
+			currently_disabled.iter().find(|(idx, _)| *idx == offender_idx)
+		{
+			if offender_slash_severity > *old_severity {
+				log!(debug, "Offender already disabled but with lower severity, will disable again to refresh severity of {:?}", offender_idx);
+				return DisablingDecision { disable: Some(offender_idx), reenable: None };
+			} else {
+				log!(debug, "Offender already disabled with higher or equal severity");
+				return DisablingDecision { disable: None, reenable: None };
+			}
+		}
+
+		// We don't disable more than the limit (but we can re-enable a smaller offender to make
+		// space)
+		if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
+			log!(
+				debug,
+				"Reached disabling limit {:?}, checking for re-enabling",
+				Self::disable_limit(active_set.len())
+			);
+
+			// Find the smallest offender to re-enable that is not higher than
+			// offender_slash_severity
+			if let Some((smallest_idx, _)) = currently_disabled
+				.iter()
+				.filter(|(_, severity)| *severity <= offender_slash_severity)
+				.min_by_key(|(_, severity)| *severity)
+			{
+				log!(debug, "Will disable {:?} and re-enable {:?}", offender_idx, smallest_idx);
+				return DisablingDecision {
+					disable: Some(offender_idx),
+					reenable: Some(*smallest_idx),
+				}
+			} else {
+				log!(debug, "No smaller offender found to re-enable");
+				return DisablingDecision { disable: None, reenable: None }
+			}
+		} else {
+			// If we are not at the limit, just disable the new offender and dont re-enable anyone
+			log!(debug, "Will disable {:?}", offender_idx);
+			return DisablingDecision { disable: Some(offender_idx), reenable: None }
+		}
+	}
+}
diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs
index 98ce774e428154e0752f553d4dbfddb438a0b4f2..a80a2b235757bab833397289a5753402a6cd3b93 100644
--- a/substrate/frame/session/src/lib.rs
+++ b/substrate/frame/session/src/lib.rs
@@ -106,6 +106,7 @@
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
+pub mod disabling;
 #[cfg(feature = "historical")]
 pub mod historical;
 pub mod migrations;
@@ -123,6 +124,7 @@ use core::{
 	marker::PhantomData,
 	ops::{Rem, Sub},
 };
+use disabling::DisablingStrategy;
 use frame_support::{
 	dispatch::DispatchResult,
 	ensure,
@@ -136,13 +138,26 @@ use frame_support::{
 use frame_system::pallet_prelude::BlockNumberFor;
 use sp_runtime::{
 	traits::{AtLeast32BitUnsigned, Convert, Member, One, OpaqueKeys, Zero},
-	ConsensusEngineId, DispatchError, KeyTypeId, Permill, RuntimeAppPublic,
+	ConsensusEngineId, DispatchError, KeyTypeId, Perbill, Permill, RuntimeAppPublic,
 };
-use sp_staking::SessionIndex;
+use sp_staking::{offence::OffenceSeverity, SessionIndex};
 
 pub use pallet::*;
 pub use weights::WeightInfo;
 
+pub(crate) const LOG_TARGET: &str = "runtime::session";
+
+// syntactic sugar for logging.
+#[macro_export]
+macro_rules! log {
+	($level:tt, $patter:expr $(, $values:expr)* $(,)?) => {
+		log::$level!(
+			target: crate::LOG_TARGET,
+			concat!("[{:?}] 💸 ", $patter), <frame_system::Pallet<T>>::block_number() $(, $values)*
+		)
+	};
+}
+
 /// Decides whether the session should be ended.
 pub trait ShouldEndSession<BlockNumber> {
 	/// Return `true` if the session should be ended.
@@ -375,7 +390,7 @@ pub mod pallet {
 	use frame_system::pallet_prelude::*;
 
 	/// The in-code storage version.
-	const STORAGE_VERSION: StorageVersion = StorageVersion::new(0);
+	const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
 
 	#[pallet::pallet]
 	#[pallet::storage_version(STORAGE_VERSION)]
@@ -385,7 +400,7 @@ pub mod pallet {
 	#[pallet::config]
 	pub trait Config: frame_system::Config {
 		/// The overarching event type.
-		type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
+		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
 
 		/// A stable ID for a validator.
 		type ValidatorId: Member
@@ -416,6 +431,9 @@ pub mod pallet {
 		/// The keys.
 		type Keys: OpaqueKeys + Member + Parameter + MaybeSerializeDeserialize;
 
+		/// `DisablingStragegy` controls how validators are disabled
+		type DisablingStrategy: DisablingStrategy<Self>;
+
 		/// Weight information for extrinsics in this pallet.
 		type WeightInfo: WeightInfo;
 	}
@@ -518,7 +536,7 @@ pub mod pallet {
 	/// disabled using binary search. It gets cleared when `on_session_ending` returns
 	/// a new set of identities.
 	#[pallet::storage]
-	pub type DisabledValidators<T> = StorageValue<_, Vec<u32>, ValueQuery>;
+	pub type DisabledValidators<T> = StorageValue<_, Vec<(u32, OffenceSeverity)>, ValueQuery>;
 
 	/// The next session keys for a validator.
 	#[pallet::storage]
@@ -532,10 +550,14 @@ pub mod pallet {
 
 	#[pallet::event]
 	#[pallet::generate_deposit(pub(super) fn deposit_event)]
-	pub enum Event {
+	pub enum Event<T: Config> {
 		/// New session has happened. Note that the argument is the session index, not the
 		/// block number as the type might suggest.
 		NewSession { session_index: SessionIndex },
+		/// Validator has been disabled.
+		ValidatorDisabled { validator: T::ValidatorId },
+		/// Validator has been re-enabled.
+		ValidatorReenabled { validator: T::ValidatorId },
 	}
 
 	/// Error for the session pallet.
@@ -631,7 +653,7 @@ impl<T: Config> Pallet<T> {
 
 	/// Public function to access the disabled validators.
 	pub fn disabled_validators() -> Vec<u32> {
-		DisabledValidators::<T>::get()
+		DisabledValidators::<T>::get().iter().map(|(i, _)| *i).collect()
 	}
 
 	/// Move on to next session. Register new validator set and session keys. Changes to the
@@ -644,7 +666,7 @@ impl<T: Config> Pallet<T> {
 		// Inform the session handlers that a session is going to end.
 		T::SessionHandler::on_before_session_ending();
 		T::SessionManager::end_session(session_index);
-		log::trace!(target: "runtime::session", "ending_session {:?}", session_index);
+		log!(trace, "ending_session {:?}", session_index);
 
 		// Get queued session keys and validators.
 		let session_keys = QueuedKeys::<T>::get();
@@ -724,14 +746,16 @@ impl<T: Config> Pallet<T> {
 	}
 
 	/// Disable the validator of index `i`, returns `false` if the validator was already disabled.
+	///
+	/// Note: This sets the OffenceSeverity to the lowest value.
 	pub fn disable_index(i: u32) -> bool {
-		if i >= Validators::<T>::decode_len().unwrap_or(0) as u32 {
+		if i >= Validators::<T>::decode_len().defensive_unwrap_or(0) as u32 {
 			return false
 		}
 
 		DisabledValidators::<T>::mutate(|disabled| {
-			if let Err(index) = disabled.binary_search(&i) {
-				disabled.insert(index, i);
+			if let Err(index) = disabled.binary_search_by_key(&i, |(index, _)| *index) {
+				disabled.insert(index, (i, OffenceSeverity(Perbill::zero())));
 				T::SessionHandler::on_disabled(i);
 				return true
 			}
@@ -740,23 +764,6 @@ impl<T: Config> Pallet<T> {
 		})
 	}
 
-	/// Re-enable the validator of index `i`, returns `false` if the validator was already enabled.
-	pub fn enable_index(i: u32) -> bool {
-		if i >= Validators::<T>::decode_len().defensive_unwrap_or(0) as u32 {
-			return false
-		}
-
-		// If the validator is not disabled, return false.
-		DisabledValidators::<T>::mutate(|disabled| {
-			if let Ok(index) = disabled.binary_search(&i) {
-				disabled.remove(index);
-				true
-			} else {
-				false
-			}
-		})
-	}
-
 	/// Disable the validator identified by `c`. (If using with the staking pallet,
 	/// this would be their *stash* account.)
 	///
@@ -920,6 +927,47 @@ impl<T: Config> Pallet<T> {
 	fn clear_key_owner(id: KeyTypeId, key_data: &[u8]) {
 		KeyOwner::<T>::remove((id, key_data));
 	}
+
+	pub fn report_offence(validator: T::ValidatorId, severity: OffenceSeverity) {
+		DisabledValidators::<T>::mutate(|disabled| {
+			let decision = T::DisablingStrategy::decision(&validator, severity, &disabled);
+
+			if let Some(offender_idx) = decision.disable {
+				// Check if the offender is already disabled
+				match disabled.binary_search_by_key(&offender_idx, |(index, _)| *index) {
+					// Offender is already disabled, update severity if the new one is higher
+					Ok(index) => {
+						let (_, old_severity) = &mut disabled[index];
+						if severity > *old_severity {
+							*old_severity = severity;
+						}
+					},
+					Err(index) => {
+						// Offender is not disabled, add to `DisabledValidators` and disable it
+						disabled.insert(index, (offender_idx, severity));
+						// let the session handlers know that a validator got disabled
+						T::SessionHandler::on_disabled(offender_idx);
+
+						// Emit event that a validator got disabled
+						Self::deposit_event(Event::ValidatorDisabled {
+							validator: validator.clone(),
+						});
+					},
+				}
+			}
+
+			if let Some(reenable_idx) = decision.reenable {
+				// Remove the validator from `DisabledValidators` and re-enable it.
+				if let Ok(index) = disabled.binary_search_by_key(&reenable_idx, |(index, _)| *index)
+				{
+					disabled.remove(index);
+					// Emit event that a validator got re-enabled
+					let reenabled_stash = Validators::<T>::get()[reenable_idx as usize].clone();
+					Self::deposit_event(Event::ValidatorReenabled { validator: reenabled_stash });
+				}
+			}
+		});
+	}
 }
 
 impl<T: Config> ValidatorRegistration<T::ValidatorId> for Pallet<T> {
@@ -955,11 +1003,11 @@ impl<T: Config> EstimateNextNewSession<BlockNumberFor<T>> for Pallet<T> {
 
 impl<T: Config> frame_support::traits::DisabledValidators for Pallet<T> {
 	fn is_disabled(index: u32) -> bool {
-		DisabledValidators::<T>::get().binary_search(&index).is_ok()
+		DisabledValidators::<T>::get().binary_search_by_key(&index, |(i, _)| *i).is_ok()
 	}
 
 	fn disabled_validators() -> Vec<u32> {
-		DisabledValidators::<T>::get()
+		Self::disabled_validators()
 	}
 }
 
diff --git a/substrate/frame/session/src/migrations/historical.rs b/substrate/frame/session/src/migrations/historical.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b6838099837a00a7f440cf8229fbdc2c9bd5b896
--- /dev/null
+++ b/substrate/frame/session/src/migrations/historical.rs
@@ -0,0 +1,196 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use core::str;
+use sp_io::hashing::twox_128;
+
+use frame_support::{
+	storage::{generator::StorageValue, StoragePrefixedMap},
+	traits::{
+		Get, GetStorageVersion, PalletInfoAccess, StorageVersion,
+		STORAGE_VERSION_STORAGE_KEY_POSTFIX,
+	},
+	weights::Weight,
+};
+
+use crate::historical as pallet_session_historical;
+
+const LOG_TARGET: &str = "runtime::session_historical";
+
+const OLD_PREFIX: &str = "Session";
+
+/// Migrate the entire storage of this pallet to a new prefix.
+///
+/// This new prefix must be the same as the one set in construct_runtime.
+///
+/// The migration will look into the storage version in order not to trigger a migration on an up
+/// to date storage. Thus the on chain storage version must be less than 1 in order to trigger the
+/// migration.
+pub fn migrate<T: pallet_session_historical::Config, P: GetStorageVersion + PalletInfoAccess>(
+) -> Weight {
+	let new_pallet_name = <P as PalletInfoAccess>::name();
+
+	if new_pallet_name == OLD_PREFIX {
+		log::info!(
+			target: LOG_TARGET,
+			"New pallet name is equal to the old prefix. No migration needs to be done.",
+		);
+		return Weight::zero()
+	}
+
+	let on_chain_storage_version = <P as GetStorageVersion>::on_chain_storage_version();
+	log::info!(
+		target: LOG_TARGET,
+		"Running migration to v1 for session_historical with storage version {:?}",
+		on_chain_storage_version,
+	);
+
+	if on_chain_storage_version < 1 {
+		let storage_prefix = pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
+		frame_support::storage::migration::move_storage_from_pallet(
+			storage_prefix,
+			OLD_PREFIX.as_bytes(),
+			new_pallet_name.as_bytes(),
+		);
+		log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
+
+		let storage_prefix = pallet_session_historical::StoredRange::<T>::storage_prefix();
+		frame_support::storage::migration::move_storage_from_pallet(
+			storage_prefix,
+			OLD_PREFIX.as_bytes(),
+			new_pallet_name.as_bytes(),
+		);
+		log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
+
+		StorageVersion::new(1).put::<P>();
+		<T as frame_system::Config>::BlockWeights::get().max_block
+	} else {
+		log::warn!(
+			target: LOG_TARGET,
+			"Attempted to apply migration to v1 but failed because storage version is {:?}",
+			on_chain_storage_version,
+		);
+		Weight::zero()
+	}
+}
+
+/// Some checks prior to migration. This can be linked to
+/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing.
+///
+/// Panics if anything goes wrong.
+pub fn pre_migrate<
+	T: pallet_session_historical::Config,
+	P: GetStorageVersion + PalletInfoAccess,
+>() {
+	let new_pallet_name = <P as PalletInfoAccess>::name();
+
+	let storage_prefix_historical_sessions =
+		pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
+	let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
+
+	log_migration("pre-migration", storage_prefix_historical_sessions, OLD_PREFIX, new_pallet_name);
+	log_migration("pre-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
+
+	if new_pallet_name == OLD_PREFIX {
+		return
+	}
+
+	let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
+	let storage_version_key = twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX);
+
+	let mut new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
+		new_pallet_prefix.to_vec(),
+		new_pallet_prefix.to_vec(),
+		|key| Ok(key.to_vec()),
+	);
+
+	// Ensure nothing except the storage_version_key is stored in the new prefix.
+	assert!(new_pallet_prefix_iter.all(|key| key == storage_version_key));
+
+	assert!(<P as GetStorageVersion>::on_chain_storage_version() < 1);
+}
+
+/// Some checks for after migration. This can be linked to
+/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing.
+///
+/// Panics if anything goes wrong.
+pub fn post_migrate<
+	T: pallet_session_historical::Config,
+	P: GetStorageVersion + PalletInfoAccess,
+>() {
+	let new_pallet_name = <P as PalletInfoAccess>::name();
+
+	let storage_prefix_historical_sessions =
+		pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
+	let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
+
+	log_migration(
+		"post-migration",
+		storage_prefix_historical_sessions,
+		OLD_PREFIX,
+		new_pallet_name,
+	);
+	log_migration("post-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
+
+	if new_pallet_name == OLD_PREFIX {
+		return
+	}
+
+	// Assert that no `HistoricalSessions` and `StoredRange` storages remains at the old prefix.
+	let old_pallet_prefix = twox_128(OLD_PREFIX.as_bytes());
+	let old_historical_sessions_key =
+		[&old_pallet_prefix, &twox_128(storage_prefix_historical_sessions)[..]].concat();
+	let old_historical_sessions_key_iter = frame_support::storage::KeyPrefixIterator::new(
+		old_historical_sessions_key.to_vec(),
+		old_historical_sessions_key.to_vec(),
+		|_| Ok(()),
+	);
+	assert_eq!(old_historical_sessions_key_iter.count(), 0);
+
+	let old_stored_range_key =
+		[&old_pallet_prefix, &twox_128(storage_prefix_stored_range)[..]].concat();
+	let old_stored_range_key_iter = frame_support::storage::KeyPrefixIterator::new(
+		old_stored_range_key.to_vec(),
+		old_stored_range_key.to_vec(),
+		|_| Ok(()),
+	);
+	assert_eq!(old_stored_range_key_iter.count(), 0);
+
+	// Assert that the `HistoricalSessions` and `StoredRange` storages (if they exist) have been
+	// moved to the new prefix.
+	// NOTE: storage_version_key is already in the new prefix.
+	let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
+	let new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
+		new_pallet_prefix.to_vec(),
+		new_pallet_prefix.to_vec(),
+		|_| Ok(()),
+	);
+	assert!(new_pallet_prefix_iter.count() >= 1);
+
+	assert_eq!(<P as GetStorageVersion>::on_chain_storage_version(), 1);
+}
+
+fn log_migration(stage: &str, storage_prefix: &[u8], old_pallet_name: &str, new_pallet_name: &str) {
+	log::info!(
+		target: LOG_TARGET,
+		"{} prefix of storage '{}': '{}' ==> '{}'",
+		stage,
+		str::from_utf8(storage_prefix).unwrap_or("<Invalid UTF8>"),
+		old_pallet_name,
+		new_pallet_name,
+	);
+}
diff --git a/substrate/frame/session/src/migrations/mod.rs b/substrate/frame/session/src/migrations/mod.rs
index 3b15d0ac4646abaef577e136fd35a4d1a8840344..730dd9c69edce5c2a4eaa79939f95c090dca1147 100644
--- a/substrate/frame/session/src/migrations/mod.rs
+++ b/substrate/frame/session/src/migrations/mod.rs
@@ -21,4 +21,5 @@
 /// In version 1 it uses its name as configured in `construct_runtime`.
 /// This migration moves session historical pallet storages from old prefix to new prefix.
 #[cfg(feature = "historical")]
+pub mod historical;
 pub mod v1;
diff --git a/substrate/frame/session/src/migrations/v1.rs b/substrate/frame/session/src/migrations/v1.rs
index b6838099837a00a7f440cf8229fbdc2c9bd5b896..bac0af6fe6b0f604eb1eecb898d68c33f62da282 100644
--- a/substrate/frame/session/src/migrations/v1.rs
+++ b/substrate/frame/session/src/migrations/v1.rs
@@ -15,182 +15,93 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-use core::str;
-use sp_io::hashing::twox_128;
-
+use crate::{Config, DisabledValidators as NewDisabledValidators, Pallet, Perbill, Vec};
 use frame_support::{
-	storage::{generator::StorageValue, StoragePrefixedMap},
-	traits::{
-		Get, GetStorageVersion, PalletInfoAccess, StorageVersion,
-		STORAGE_VERSION_STORAGE_KEY_POSTFIX,
-	},
-	weights::Weight,
+	pallet_prelude::{Get, ValueQuery, Weight},
+	traits::UncheckedOnRuntimeUpgrade,
 };
+use sp_staking::offence::OffenceSeverity;
 
-use crate::historical as pallet_session_historical;
+#[cfg(feature = "try-runtime")]
+use sp_runtime::TryRuntimeError;
 
-const LOG_TARGET: &str = "runtime::session_historical";
+#[cfg(feature = "try-runtime")]
+use frame_support::ensure;
+use frame_support::migrations::VersionedMigration;
 
-const OLD_PREFIX: &str = "Session";
+/// This is the storage getting migrated.
+#[frame_support::storage_alias]
+type DisabledValidators<T: Config> = StorageValue<Pallet<T>, Vec<u32>, ValueQuery>;
 
-/// Migrate the entire storage of this pallet to a new prefix.
-///
-/// This new prefix must be the same as the one set in construct_runtime.
-///
-/// The migration will look into the storage version in order not to trigger a migration on an up
-/// to date storage. Thus the on chain storage version must be less than 1 in order to trigger the
-/// migration.
-pub fn migrate<T: pallet_session_historical::Config, P: GetStorageVersion + PalletInfoAccess>(
-) -> Weight {
-	let new_pallet_name = <P as PalletInfoAccess>::name();
+pub trait MigrateDisabledValidators {
+	/// Peek the list of disabled validators and their offence severity.
+	#[cfg(feature = "try-runtime")]
+	fn peek_disabled() -> Vec<(u32, OffenceSeverity)>;
 
-	if new_pallet_name == OLD_PREFIX {
-		log::info!(
-			target: LOG_TARGET,
-			"New pallet name is equal to the old prefix. No migration needs to be done.",
-		);
-		return Weight::zero()
-	}
+	/// Return the list of disabled validators and their offence severity, removing them from the
+	/// underlying storage.
+	fn take_disabled() -> Vec<(u32, OffenceSeverity)>;
+}
 
-	let on_chain_storage_version = <P as GetStorageVersion>::on_chain_storage_version();
-	log::info!(
-		target: LOG_TARGET,
-		"Running migration to v1 for session_historical with storage version {:?}",
-		on_chain_storage_version,
-	);
-
-	if on_chain_storage_version < 1 {
-		let storage_prefix = pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
-		frame_support::storage::migration::move_storage_from_pallet(
-			storage_prefix,
-			OLD_PREFIX.as_bytes(),
-			new_pallet_name.as_bytes(),
-		);
-		log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
+pub struct InitOffenceSeverity<T>(core::marker::PhantomData<T>);
+impl<T: Config> MigrateDisabledValidators for InitOffenceSeverity<T> {
+	#[cfg(feature = "try-runtime")]
+	fn peek_disabled() -> Vec<(u32, OffenceSeverity)> {
+		DisabledValidators::<T>::get()
+			.iter()
+			.map(|v| (*v, OffenceSeverity(Perbill::zero())))
+			.collect::<Vec<_>>()
+	}
 
-		let storage_prefix = pallet_session_historical::StoredRange::<T>::storage_prefix();
-		frame_support::storage::migration::move_storage_from_pallet(
-			storage_prefix,
-			OLD_PREFIX.as_bytes(),
-			new_pallet_name.as_bytes(),
-		);
-		log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
-
-		StorageVersion::new(1).put::<P>();
-		<T as frame_system::Config>::BlockWeights::get().max_block
-	} else {
-		log::warn!(
-			target: LOG_TARGET,
-			"Attempted to apply migration to v1 but failed because storage version is {:?}",
-			on_chain_storage_version,
-		);
-		Weight::zero()
+	fn take_disabled() -> Vec<(u32, OffenceSeverity)> {
+		DisabledValidators::<T>::take()
+			.iter()
+			.map(|v| (*v, OffenceSeverity(Perbill::zero())))
+			.collect::<Vec<_>>()
 	}
 }
-
-/// Some checks prior to migration. This can be linked to
-/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing.
-///
-/// Panics if anything goes wrong.
-pub fn pre_migrate<
-	T: pallet_session_historical::Config,
-	P: GetStorageVersion + PalletInfoAccess,
->() {
-	let new_pallet_name = <P as PalletInfoAccess>::name();
-
-	let storage_prefix_historical_sessions =
-		pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
-	let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
-
-	log_migration("pre-migration", storage_prefix_historical_sessions, OLD_PREFIX, new_pallet_name);
-	log_migration("pre-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
-
-	if new_pallet_name == OLD_PREFIX {
-		return
+pub struct VersionUncheckedMigrateV0ToV1<T, S: MigrateDisabledValidators>(
+	core::marker::PhantomData<(T, S)>,
+);
+
+impl<T: Config, S: MigrateDisabledValidators> UncheckedOnRuntimeUpgrade
+	for VersionUncheckedMigrateV0ToV1<T, S>
+{
+	fn on_runtime_upgrade() -> Weight {
+		let disabled = S::take_disabled();
+		NewDisabledValidators::<T>::put(disabled);
+
+		T::DbWeight::get().reads_writes(1, 1)
 	}
 
-	let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
-	let storage_version_key = twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX);
+	#[cfg(feature = "try-runtime")]
+	fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
+		let source_disabled = S::peek_disabled().iter().map(|(v, _s)| *v).collect::<Vec<_>>();
+		let existing_disabled = DisabledValidators::<T>::get();
 
-	let mut new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
-		new_pallet_prefix.to_vec(),
-		new_pallet_prefix.to_vec(),
-		|key| Ok(key.to_vec()),
-	);
+		ensure!(source_disabled == existing_disabled, "Disabled validators mismatch");
+		ensure!(
+			NewDisabledValidators::<T>::get().len() == crate::Validators::<T>::get().len(),
+			"Disabled validators mismatch"
+		);
+		Ok(Vec::new())
+	}
+	#[cfg(feature = "try-runtime")]
+	fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
+		let validators_max_index = crate::Validators::<T>::get().len() as u32 - 1;
 
-	// Ensure nothing except the storage_version_key is stored in the new prefix.
-	assert!(new_pallet_prefix_iter.all(|key| key == storage_version_key));
+		for (v, _s) in NewDisabledValidators::<T>::get() {
+			ensure!(v <= validators_max_index, "Disabled validator index out of bounds");
+		}
 
-	assert!(<P as GetStorageVersion>::on_chain_storage_version() < 1);
-}
-
-/// Some checks for after migration. This can be linked to
-/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing.
-///
-/// Panics if anything goes wrong.
-pub fn post_migrate<
-	T: pallet_session_historical::Config,
-	P: GetStorageVersion + PalletInfoAccess,
->() {
-	let new_pallet_name = <P as PalletInfoAccess>::name();
-
-	let storage_prefix_historical_sessions =
-		pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
-	let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
-
-	log_migration(
-		"post-migration",
-		storage_prefix_historical_sessions,
-		OLD_PREFIX,
-		new_pallet_name,
-	);
-	log_migration("post-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
-
-	if new_pallet_name == OLD_PREFIX {
-		return
+		Ok(())
 	}
-
-	// Assert that no `HistoricalSessions` and `StoredRange` storages remains at the old prefix.
-	let old_pallet_prefix = twox_128(OLD_PREFIX.as_bytes());
-	let old_historical_sessions_key =
-		[&old_pallet_prefix, &twox_128(storage_prefix_historical_sessions)[..]].concat();
-	let old_historical_sessions_key_iter = frame_support::storage::KeyPrefixIterator::new(
-		old_historical_sessions_key.to_vec(),
-		old_historical_sessions_key.to_vec(),
-		|_| Ok(()),
-	);
-	assert_eq!(old_historical_sessions_key_iter.count(), 0);
-
-	let old_stored_range_key =
-		[&old_pallet_prefix, &twox_128(storage_prefix_stored_range)[..]].concat();
-	let old_stored_range_key_iter = frame_support::storage::KeyPrefixIterator::new(
-		old_stored_range_key.to_vec(),
-		old_stored_range_key.to_vec(),
-		|_| Ok(()),
-	);
-	assert_eq!(old_stored_range_key_iter.count(), 0);
-
-	// Assert that the `HistoricalSessions` and `StoredRange` storages (if they exist) have been
-	// moved to the new prefix.
-	// NOTE: storage_version_key is already in the new prefix.
-	let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
-	let new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
-		new_pallet_prefix.to_vec(),
-		new_pallet_prefix.to_vec(),
-		|_| Ok(()),
-	);
-	assert!(new_pallet_prefix_iter.count() >= 1);
-
-	assert_eq!(<P as GetStorageVersion>::on_chain_storage_version(), 1);
 }
 
-fn log_migration(stage: &str, storage_prefix: &[u8], old_pallet_name: &str, new_pallet_name: &str) {
-	log::info!(
-		target: LOG_TARGET,
-		"{} prefix of storage '{}': '{}' ==> '{}'",
-		stage,
-		str::from_utf8(storage_prefix).unwrap_or("<Invalid UTF8>"),
-		old_pallet_name,
-		new_pallet_name,
-	);
-}
+pub type MigrateV0ToV1<T, S> = VersionedMigration<
+	0,
+	1,
+	VersionUncheckedMigrateV0ToV1<T, S>,
+	Pallet<T>,
+	<T as frame_system::Config>::DbWeight,
+>;
diff --git a/substrate/frame/session/src/mock.rs b/substrate/frame/session/src/mock.rs
index 745b57d1be41fa13c65c1494d26f2b685666270c..ac8f9d320d75a8baaf06cc38f0cc0646decc28f6 100644
--- a/substrate/frame/session/src/mock.rs
+++ b/substrate/frame/session/src/mock.rs
@@ -248,6 +248,10 @@ impl Convert<u64, Option<u64>> for TestValidatorIdOf {
 	}
 }
 
+// Disabling threshold for `UpToLimitDisablingStrategy` and
+// `UpToLimitWithReEnablingDisablingStrategy``
+pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3;
+
 impl Config for Test {
 	type ShouldEndSession = TestShouldEndSession;
 	#[cfg(feature = "historical")]
@@ -260,6 +264,8 @@ impl Config for Test {
 	type Keys = MockSessionKeys;
 	type RuntimeEvent = RuntimeEvent;
 	type NextSessionRotation = ();
+	type DisablingStrategy =
+		disabling::UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>;
 	type WeightInfo = ();
 }
 
diff --git a/substrate/frame/session/src/tests.rs b/substrate/frame/session/src/tests.rs
index f392c2ab7663c73d4b17443d7f9918ca410eceb8..42aeb8e14c36447b8a6fb1421bd9a78a68b59138 100644
--- a/substrate/frame/session/src/tests.rs
+++ b/substrate/frame/session/src/tests.rs
@@ -477,8 +477,8 @@ fn test_migration_v1() {
 		);
 		StorageVersion::new(0).put::<Historical>();
 
-		crate::migrations::v1::pre_migrate::<Test, Historical>();
-		crate::migrations::v1::migrate::<Test, Historical>();
-		crate::migrations::v1::post_migrate::<Test, Historical>();
+		crate::migrations::historical::pre_migrate::<Test, Historical>();
+		crate::migrations::historical::migrate::<Test, Historical>();
+		crate::migrations::historical::post_migrate::<Test, Historical>();
 	});
 }
diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs
index 39aa6bf3566b2950248254fea242154a1ccebfb7..577c1a71f89d7c80cec732ccae7553b77667c238 100644
--- a/substrate/frame/society/src/lib.rs
+++ b/substrate/frame/society/src/lib.rs
@@ -463,7 +463,18 @@ pub struct IntakeRecord<AccountId, Balance> {
 pub type IntakeRecordFor<T, I> =
 	IntakeRecord<<T as frame_system::Config>::AccountId, BalanceOf<T, I>>;
 
-#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Copy,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct GroupParams<Balance> {
 	max_members: u32,
 	max_intake: u32,
@@ -658,7 +669,7 @@ pub mod pallet {
 
 	/// The max number of members for the society at one time.
 	#[pallet::storage]
-	pub(super) type Parameters<T: Config<I>, I: 'static = ()> =
+	pub type Parameters<T: Config<I>, I: 'static = ()> =
 		StorageValue<_, GroupParamsFor<T, I>, OptionQuery>;
 
 	/// Amount of our account balance that is specifically for the next round's bid(s).
@@ -709,7 +720,7 @@ pub mod pallet {
 
 	/// The current bids, stored ordered by the value of the bid.
 	#[pallet::storage]
-	pub(super) type Bids<T: Config<I>, I: 'static = ()> =
+	pub type Bids<T: Config<I>, I: 'static = ()> =
 		StorageValue<_, BoundedVec<Bid<T::AccountId, BalanceOf<T, I>>, T::MaxBids>, ValueQuery>;
 
 	#[pallet::storage]
@@ -727,7 +738,7 @@ pub mod pallet {
 
 	/// Double map from Candidate -> Voter -> (Maybe) Vote.
 	#[pallet::storage]
-	pub(super) type Votes<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type Votes<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Twox64Concat,
 		T::AccountId,
@@ -739,7 +750,7 @@ pub mod pallet {
 
 	/// Clear-cursor for Vote, map from Candidate -> (Maybe) Cursor.
 	#[pallet::storage]
-	pub(super) type VoteClearCursor<T: Config<I>, I: 'static = ()> =
+	pub type VoteClearCursor<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::AccountId, BoundedVec<u8, KeyLenOf<Votes<T, I>>>>;
 
 	/// At the end of the claim period, this contains the most recently approved members (along with
@@ -751,17 +762,17 @@ pub mod pallet {
 
 	/// The number of challenge rounds there have been. Used to identify stale DefenderVotes.
 	#[pallet::storage]
-	pub(super) type ChallengeRoundCount<T: Config<I>, I: 'static = ()> =
+	pub type ChallengeRoundCount<T: Config<I>, I: 'static = ()> =
 		StorageValue<_, RoundIndex, ValueQuery>;
 
 	/// The defending member currently being challenged, along with a running tally of votes.
 	#[pallet::storage]
-	pub(super) type Defending<T: Config<I>, I: 'static = ()> =
+	pub type Defending<T: Config<I>, I: 'static = ()> =
 		StorageValue<_, (T::AccountId, T::AccountId, Tally)>;
 
 	/// Votes for the defender, keyed by challenge round.
 	#[pallet::storage]
-	pub(super) type DefenderVotes<T: Config<I>, I: 'static = ()> =
+	pub type DefenderVotes<T: Config<I>, I: 'static = ()> =
 		StorageDoubleMap<_, Twox64Concat, RoundIndex, Twox64Concat, T::AccountId, Vote>;
 
 	#[pallet::hooks]
diff --git a/substrate/frame/staking/ah-client/Cargo.toml b/substrate/frame/staking/ah-client/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..4c41380e48eddc7cc29cf429eb43fb76da948436
--- /dev/null
+++ b/substrate/frame/staking/ah-client/Cargo.toml
@@ -0,0 +1,66 @@
+[package]
+name = "pallet-staking-ah-client"
+description = "Pallet handling the communication with staking-rc-client. It's role is to glue the staking pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way."
+license = "Apache-2.0"
+version = "0.1.0"
+edition.workspace = true
+authors.workspace = true
+repository.workspace = true
+
+[dependencies]
+codec = { workspace = true, features = ["derive"] }
+frame-support = { workspace = true }
+frame-system = { workspace = true }
+log = { workspace = true }
+pallet-authorship = { workspace = true }
+pallet-session = { features = ["historical"], workspace = true }
+pallet-staking = { workspace = true }
+pallet-staking-rc-client = { workspace = true }
+polkadot-primitives = { workspace = true }
+polkadot-runtime-parachains = { workspace = true }
+scale-info = { workspace = true, features = ["derive"] }
+sp-core = { workspace = true }
+sp-runtime = { workspace = true }
+sp-staking = { workspace = true }
+xcm = { workspace = true }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"pallet-authorship/std",
+	"pallet-session/std",
+	"pallet-staking-rc-client/std",
+	"pallet-staking/std",
+	"polkadot-primitives/std",
+	"polkadot-runtime-parachains/std",
+	"scale-info/std",
+	"sp-core/std",
+	"sp-runtime/std",
+	"sp-staking/std",
+	"xcm/std",
+]
+runtime-benchmarks = [
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"pallet-staking-rc-client/runtime-benchmarks",
+	"pallet-staking/runtime-benchmarks",
+	"polkadot-primitives/runtime-benchmarks",
+	"polkadot-runtime-parachains/runtime-benchmarks",
+	"sp-runtime/runtime-benchmarks",
+	"sp-staking/runtime-benchmarks",
+	"xcm/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"pallet-authorship/try-runtime",
+	"pallet-session/try-runtime",
+	"pallet-staking-rc-client/try-runtime",
+	"pallet-staking/try-runtime",
+	"polkadot-runtime-parachains/try-runtime",
+	"sp-runtime/try-runtime",
+]
diff --git a/substrate/frame/staking/ah-client/src/lib.rs b/substrate/frame/staking/ah-client/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..88aee9ee3e9da3f7ce327088d56b29ccd2464643
--- /dev/null
+++ b/substrate/frame/staking/ah-client/src/lib.rs
@@ -0,0 +1,322 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This pallet is intended to be used on a relay chain and to communicate with its counterpart on
+//! AssetHub (or a similar network) named `pallet-staking-rc-client`.
+//!
+//! This pallet serves as an interface between the staking pallet on AssetHub and the session pallet
+//! on the relay chain. From the relay chain to AssetHub, its responsibilities are to send
+//! information about session changes (start and end) and to report offenses. From AssetHub to the
+//! relay chain, it receives information about the potentially new validator set for the session.
+//!
+//! All the communication between the two pallets is performed with XCM messages.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+extern crate alloc;
+
+use alloc::vec::Vec;
+use frame_support::pallet_prelude::*;
+use pallet_staking_rc_client::Offence;
+use sp_core::crypto::AccountId32;
+use sp_runtime::traits::Convert;
+use sp_staking::{offence::OffenceDetails, Exposure, SessionIndex};
+use xcm::prelude::*;
+
+const LOG_TARGET: &str = "runtime::staking::ah-client";
+
+/// `pallet-staking-rc-client` pallet index on AssetHub. Used to construct remote calls.
+///
+/// The codec index must correspond to the index of `pallet-staking-rc-client` in the
+/// `construct_runtime` of AssetHub.
+#[derive(Encode, Decode)]
+enum AssetHubRuntimePallets {
+	#[codec(index = 50)]
+	RcClient(StakingCalls),
+}
+
+/// Call encoding for the calls needed from the rc-client pallet.
+#[derive(Encode, Decode)]
+enum StakingCalls {
+	/// A session with the given index has started.
+	#[codec(index = 0)]
+	RelayChainSessionStart(SessionIndex),
+	// A session with the given index has ended. The block authors with their corresponding
+	// session points are provided.
+	#[codec(index = 1)]
+	RelayChainSessionEnd(SessionIndex, Vec<(AccountId32, u32)>),
+	/// Report one or more offences.
+	#[codec(index = 2)]
+	NewRelayChainOffences(SessionIndex, Vec<Offence>),
+}
+
+#[frame_support::pallet(dev_mode)]
+pub mod pallet {
+	use crate::*;
+	use alloc::vec;
+	use core::result;
+	use frame_system::pallet_prelude::*;
+	use pallet_session::historical;
+	use pallet_staking::ExposureOf;
+	use polkadot_primitives::Id as ParaId;
+	use polkadot_runtime_parachains::origin::{ensure_parachain, Origin};
+	use sp_runtime::Perbill;
+	use sp_staking::{offence::OnOffenceHandler, SessionIndex};
+
+	const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
+
+	/// The balance type of this pallet.
+	pub type BalanceOf<T> = <T as Config>::CurrencyBalance;
+
+	// `Exposure<T::AccountId, BalanceOf<T>>` will be removed. This type alias exists only to
+	// suppress clippy warnings.
+	type ElectedValidatorSet<T> = Vec<(
+		<T as frame_system::Config>::AccountId,
+		Exposure<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
+	)>;
+
+	#[pallet::pallet]
+	#[pallet::storage_version(STORAGE_VERSION)]
+	pub struct Pallet<T>(_);
+
+	// TODO: should contain some initial state, otherwise starting from genesis won't work
+	#[pallet::storage]
+	pub type ValidatorSet<T: Config> = StorageValue<_, Option<ElectedValidatorSet<T>>, ValueQuery>;
+
+	/// Keeps track of the session points for each block author in the current session.
+	#[pallet::storage]
+	pub type BlockAuthors<T: Config> = StorageMap<_, Twox64Concat, AccountId32, u32, ValueQuery>;
+
+	#[pallet::config]
+	pub trait Config: frame_system::Config {
+		type RuntimeOrigin: From<<Self as frame_system::Config>::RuntimeOrigin>
+			+ Into<result::Result<Origin, <Self as Config>::RuntimeOrigin>>;
+		/// Just the `Currency::Balance` type; we have this item to allow us to constrain it to
+		/// `From<u64>`.
+		type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned
+			+ codec::FullCodec
+			+ Copy
+			+ MaybeSerializeDeserialize
+			+ core::fmt::Debug
+			+ Default
+			+ From<u64>
+			+ TypeInfo
+			+ Send
+			+ Sync
+			+ MaxEncodedLen;
+		/// The ParaId of the AssetHub.
+		#[pallet::constant]
+		type AssetHubId: Get<u32>;
+		/// The XCM sender.
+		type SendXcm: SendXcm;
+	}
+
+	#[pallet::error]
+	pub enum Error<T> {
+		/// The ParaId making the call is not AssetHub.
+		NotAssetHub,
+	}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		#[pallet::call_index(0)]
+		// #[pallet::weight(T::WeightInfo::new_validators())] // TODO
+		pub fn new_validator_set(
+			origin: OriginFor<T>,
+			new_validator_set: ElectedValidatorSet<T>,
+		) -> DispatchResult {
+			// Ignore requests not coming from the AssetHub or root.
+			Self::ensure_root_or_para(origin, <T as Config>::AssetHubId::get().into())?;
+
+			// Save the validator set. We don't care if there is a validator set which was not used.
+			ValidatorSet::<T>::put(Some(new_validator_set));
+
+			Ok(())
+		}
+	}
+
+	impl<T: Config> historical::SessionManager<T::AccountId, Exposure<T::AccountId, BalanceOf<T>>>
+		for Pallet<T>
+	{
+		fn new_session(_: sp_staking::SessionIndex) -> Option<ElectedValidatorSet<T>> {
+			// If there is a new validator set - return it. Otherwise return `None`.
+			ValidatorSet::<T>::take()
+		}
+
+		fn new_session_genesis(
+			_: SessionIndex,
+		) -> Option<Vec<(T::AccountId, Exposure<T::AccountId, BalanceOf<T>>)>> {
+			ValidatorSet::<T>::take()
+		}
+
+		fn start_session(start_index: SessionIndex) {
+			<Self as pallet_session::SessionManager<_>>::start_session(start_index)
+		}
+
+		fn end_session(end_index: SessionIndex) {
+			<Self as pallet_session::SessionManager<_>>::end_session(end_index)
+		}
+	}
+
+	impl<T: Config> pallet_session::SessionManager<T::AccountId> for Pallet<T> {
+		fn new_session(_: u32) -> Option<Vec<<T as frame_system::Config>::AccountId>> {
+			// Doesn't do anything because all the logic is handled in `historical::SessionManager`
+			// implementation
+			defensive!("new_session should not be called");
+			None
+		}
+
+		fn end_session(session_index: u32) {
+			let authors = BlockAuthors::<T>::iter().collect::<Vec<_>>();
+			// The maximum number of block authors is `num_cores * max_validators_per_core` (both
+			// are parameters from [`SchedulerParams`]).
+			let _ = BlockAuthors::<T>::clear(u32::MAX, None);
+
+			let message = Xcm(vec![
+				Instruction::UnpaidExecution {
+					weight_limit: WeightLimit::Unlimited,
+					check_origin: None,
+				},
+				mk_asset_hub_call(StakingCalls::RelayChainSessionEnd(session_index, authors)),
+			]);
+
+			if let Err(err) = send_xcm::<T::SendXcm>(
+				Location::new(0, [Junction::Parachain(T::AssetHubId::get())]),
+				message,
+			) {
+				log::error!(target: LOG_TARGET, "Sending `RelayChainSessionEnd` to AssetHub failed: {:?}", err);
+			}
+		}
+
+		fn start_session(session_index: u32) {
+			let message = Xcm(vec![
+				Instruction::UnpaidExecution {
+					weight_limit: WeightLimit::Unlimited,
+					check_origin: None,
+				},
+				mk_asset_hub_call(StakingCalls::RelayChainSessionStart(session_index)),
+			]);
+			if let Err(err) = send_xcm::<T::SendXcm>(
+				Location::new(0, [Junction::Parachain(T::AssetHubId::get())]),
+				message,
+			) {
+				log::error!(target: LOG_TARGET, "Sending `RelayChainSessionStart` to AssetHub failed: {:?}", err);
+			}
+		}
+	}
+
+	impl<T> pallet_authorship::EventHandler<T::AccountId, BlockNumberFor<T>> for Pallet<T>
+	where
+		T: Config + pallet_authorship::Config + pallet_session::Config + Config,
+		T::AccountId: Into<AccountId32>,
+	{
+		// Notes the authored block in `BlockAuthors`.
+		fn note_author(author: T::AccountId) {
+			BlockAuthors::<T>::mutate(author.into(), |block_count| {
+				*block_count += 1;
+			});
+		}
+	}
+
+	impl<T: Config>
+		OnOffenceHandler<T::AccountId, pallet_session::historical::IdentificationTuple<T>, Weight>
+		for Pallet<T>
+	where
+		T: pallet_session::Config<ValidatorId = <T as frame_system::Config>::AccountId>,
+		T: pallet_session::historical::Config<
+			FullIdentification = Exposure<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
+			FullIdentificationOf = ExposureOf<T>,
+		>,
+		T::SessionHandler: pallet_session::SessionHandler<<T as frame_system::Config>::AccountId>,
+		T::SessionManager: pallet_session::SessionManager<<T as frame_system::Config>::AccountId>,
+		T::ValidatorIdOf: Convert<
+			<T as frame_system::Config>::AccountId,
+			Option<<T as frame_system::Config>::AccountId>,
+		>,
+		T::AccountId: Into<AccountId32>,
+	{
+		fn on_offence(
+			offenders: &[OffenceDetails<
+				T::AccountId,
+				pallet_session::historical::IdentificationTuple<T>,
+			>],
+			slash_fraction: &[Perbill],
+			slash_session: SessionIndex,
+		) -> Weight {
+			let offenders_and_slashes = offenders
+				.iter()
+				.cloned()
+				.zip(slash_fraction)
+				.map(|(offence, fraction)| {
+					Offence::new(
+						offence.offender.0.into(),
+						offence.reporters.into_iter().map(|r| r.into()).collect(),
+						*fraction,
+					)
+				})
+				.collect::<Vec<_>>();
+
+			// send the offender immediately over xcm
+			let message = Xcm(vec![
+				Instruction::UnpaidExecution {
+					weight_limit: WeightLimit::Unlimited,
+					check_origin: None,
+				},
+				mk_asset_hub_call(StakingCalls::NewRelayChainOffences(
+					slash_session,
+					offenders_and_slashes,
+				)),
+			]);
+			if let Err(err) = send_xcm::<T::SendXcm>(
+				Location::new(0, [Junction::Parachain(T::AssetHubId::get())]),
+				message,
+			) {
+				log::error!(target: LOG_TARGET, "Sending `NewRelayChainOffences` to AssetHub failed: {:?}",
+			err);
+			}
+
+			Weight::zero()
+		}
+	}
+
+	impl<T: Config> Pallet<T> {
+		/// Ensure the origin is one of Root or the `para` itself.
+		fn ensure_root_or_para(
+			origin: <T as frame_system::Config>::RuntimeOrigin,
+			id: ParaId,
+		) -> DispatchResult {
+			if let Ok(caller_id) =
+				ensure_parachain(<T as Config>::RuntimeOrigin::from(origin.clone()))
+			{
+				// Check if matching para id...
+				ensure!(caller_id == id, Error::<T>::NotAssetHub);
+			} else {
+				// Check if root...
+				ensure_root(origin.clone())?;
+			}
+			Ok(())
+		}
+	}
+
+	fn mk_asset_hub_call(call: StakingCalls) -> Instruction<()> {
+		Instruction::Transact {
+			origin_kind: OriginKind::Superuser,
+			fallback_max_weight: None,
+			call: AssetHubRuntimePallets::RcClient(call).encode().into(),
+		}
+	}
+}
diff --git a/substrate/frame/staking/rc-client/Cargo.toml b/substrate/frame/staking/rc-client/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..5498cae777e12c2eea09eaae2db9828afad21196
--- /dev/null
+++ b/substrate/frame/staking/rc-client/Cargo.toml
@@ -0,0 +1,45 @@
+[package]
+name = "pallet-staking-rc-client"
+description = "Pallet handling the communication with staking-ah-client. It's role is to glue the staking pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way."
+license = "Apache-2.0"
+version = "0.1.0"
+edition.workspace = true
+authors.workspace = true
+repository.workspace = true
+
+[dependencies]
+codec = { workspace = true, features = ["derive"] }
+frame-support = { workspace = true }
+frame-system = { workspace = true }
+log = { workspace = true }
+scale-info = { workspace = true, features = ["derive"] }
+sp-core = { workspace = true }
+sp-runtime = { features = ["serde"], workspace = true }
+sp-staking = { features = ["serde"], workspace = true }
+xcm = { workspace = true }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"scale-info/std",
+	"sp-core/std",
+	"sp-runtime/std",
+	"sp-staking/std",
+	"xcm/std",
+]
+runtime-benchmarks = [
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"sp-runtime/runtime-benchmarks",
+	"sp-staking/runtime-benchmarks",
+	"xcm/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"sp-runtime/try-runtime",
+]
diff --git a/substrate/frame/staking/rc-client/src/lib.rs b/substrate/frame/staking/rc-client/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..dc6c0b7e5c6fc4d8fc9c9e7196cf9b76890451d9
--- /dev/null
+++ b/substrate/frame/staking/rc-client/src/lib.rs
@@ -0,0 +1,181 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This pallet is intended to be used on AssetHub. It provides extrinsics used by
+//! `pallet-staking-ah-client` and serves as an interface between the relay chain and the staking
+//! pallet on AssetHub.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+extern crate alloc;
+
+use alloc::vec::Vec;
+use frame_support::pallet_prelude::*;
+use sp_core::crypto::AccountId32;
+use sp_runtime::Perbill;
+use sp_staking::SessionIndex;
+use xcm::prelude::*;
+
+const LOG_TARGET: &str = "runtime::staking::rc-client";
+
+// Provides to the pallet a validator set produced by an election or other similar mechanism.
+pub trait ElectionResultHandler<ValidatorId> {
+	fn handle_election_result(result: Vec<ValidatorId>);
+}
+
+// API provided by the staking pallet.
+pub trait StakingApi {
+	/// New session with index `start_index` has started on the relay chain.
+	fn on_relay_chain_session_start(start_index: SessionIndex);
+	/// A session with index `end_index` has ended on the relay chain. The block authors and their
+	/// corresponding session points are reported.
+	fn on_relay_chain_session_end(end_index: SessionIndex, block_authors: Vec<(AccountId32, u32)>);
+	/// Report one or more offences on the relay chain.
+	fn on_new_offences(offences: Vec<Offence>);
+}
+
+/// `pallet-staking-ah-client` pallet index on Relay chain. Used to construct remote calls.
+///
+/// The codec index must correspond to the index of `pallet-staking-ah-client` in the
+/// `construct_runtime` of the Relay chain.
+#[derive(Encode, Decode)]
+enum RelayChainRuntimePallets {
+	#[codec(index = 50)]
+	AhClient(SessionCalls),
+}
+
+/// Call encoding for the calls needed from the pallet.
+#[derive(Encode, Decode)]
+enum SessionCalls {
+	#[codec(index = 0)]
+	NewValidatorSet(Vec<AccountId32>),
+}
+
+// An offence on the relay chain. Based on [`sp_staking::offence::OffenceDetails`].
+#[derive(Encode, Decode, Debug, Clone, PartialEq, TypeInfo)]
+pub struct Offence {
+	offender: AccountId32,
+	reporters: Vec<AccountId32>,
+	slash_fraction: Perbill,
+}
+
+impl Offence {
+	pub fn new(
+		offender: AccountId32,
+		reporters: Vec<AccountId32>,
+		slash_fraction: Perbill,
+	) -> Self {
+		Self { offender, reporters, slash_fraction }
+	}
+}
+
+#[frame_support::pallet(dev_mode)]
+pub mod pallet {
+	use super::*;
+	use alloc::vec;
+	use frame_system::pallet_prelude::*;
+
+	/// The in-code storage version.
+	const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
+
+	#[pallet::pallet]
+	#[pallet::storage_version(STORAGE_VERSION)]
+	pub struct Pallet<T>(_);
+
+	#[pallet::config]
+	pub trait Config: frame_system::Config {
+		type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
+		/// A stable ID for a validator.
+		type ValidatorId: Member
+			+ Parameter
+			+ MaybeSerializeDeserialize
+			+ MaxEncodedLen
+			+ TryFrom<Self::AccountId>;
+
+		/// Handler for staking calls
+		type StakingApi: StakingApi;
+		/// The XCM sender.
+		type SendXcm: SendXcm;
+	}
+
+	impl<T: Config, ValidatorId: Into<AccountId32>> ElectionResultHandler<ValidatorId> for Pallet<T> {
+		fn handle_election_result(result: Vec<ValidatorId>) {
+			let new_validator_set = result.into_iter().map(Into::into).collect::<Vec<_>>();
+
+			let message = Xcm(vec![
+				Instruction::UnpaidExecution {
+					weight_limit: WeightLimit::Unlimited,
+					check_origin: None,
+				},
+				mk_relay_chain_call(SessionCalls::NewValidatorSet(new_validator_set)),
+			]);
+
+			if let Err(err) = send_xcm::<T::SendXcm>(Location::new(1, Here), message) {
+				log::error!(target: LOG_TARGET, "Sending `NewValidators` to relay chain failed: {:?}", err);
+			}
+		}
+	}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		/// Called to indicate the start of a new session on the relay chain.
+		#[pallet::call_index(0)]
+		// #[pallet::weight(T::WeightInfo::end_session())] // TODO
+		pub fn relay_chain_session_start(
+			origin: OriginFor<T>,
+			start_index: SessionIndex,
+		) -> DispatchResult {
+			T::AdminOrigin::ensure_origin_or_root(origin)?;
+			T::StakingApi::on_relay_chain_session_start(start_index);
+			Ok(())
+		}
+
+		/// Called to indicate the end of a session on the relay chain. Accepts the session id and
+		/// the block authors with their corresponding session points for the finished session.
+		#[pallet::call_index(1)]
+		// #[pallet::weight(T::WeightInfo::end_session())] // TODO
+		pub fn relay_chain_session_end(
+			origin: OriginFor<T>,
+			end_index: SessionIndex,
+			block_authors: Vec<(AccountId32, u32)>,
+		) -> DispatchResult {
+			T::AdminOrigin::ensure_origin_or_root(origin)?;
+			T::StakingApi::on_relay_chain_session_end(end_index, block_authors);
+			Ok(())
+		}
+
+		/// Called to report one or more new offenses on the relay chain.
+		#[pallet::call_index(2)]
+		// #[pallet::weight(T::WeightInfo::end_session())] // TODO
+		pub fn new_relay_chain_offence(
+			origin: OriginFor<T>,
+			offences: Vec<Offence>,
+		) -> DispatchResult {
+			T::AdminOrigin::ensure_origin_or_root(origin)?;
+			T::StakingApi::on_new_offences(offences);
+			Ok(())
+		}
+	}
+
+	fn mk_relay_chain_call(call: SessionCalls) -> Instruction<()> {
+		Instruction::Transact {
+			origin_kind: OriginKind::Superuser,
+			fallback_max_weight: None,
+			call: RelayChainRuntimePallets::AhClient(call).encode().into(),
+		}
+	}
+}
diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs
index 43e3e13270343a5c7fd13ce10129a069a3be4cb9..f01b36934486882334b3f620ea8dc3ef55498324 100644
--- a/substrate/frame/staking/src/benchmarking.rs
+++ b/substrate/frame/staking/src/benchmarking.rs
@@ -802,21 +802,33 @@ mod benchmarks {
 
 	#[benchmark]
 	fn cancel_deferred_slash(s: Linear<1, MAX_SLASHES>) {
-		let mut unapplied_slashes = Vec::new();
 		let era = EraIndex::one();
-		let dummy = || T::AccountId::decode(&mut TrailingZeroInput::zeroes()).unwrap();
-		for _ in 0..MAX_SLASHES {
-			unapplied_slashes
-				.push(UnappliedSlash::<T::AccountId, BalanceOf<T>>::default_from(dummy()));
+		let dummy_account = || T::AccountId::decode(&mut TrailingZeroInput::zeroes()).unwrap();
+
+		// Insert `s` unapplied slashes with the new key structure
+		for i in 0..s {
+			let slash_key = (dummy_account(), Perbill::from_percent(i as u32 % 100), i);
+			let unapplied_slash = UnappliedSlash::<T> {
+				validator: slash_key.0.clone(),
+				own: Zero::zero(),
+				others: WeakBoundedVec::default(),
+				reporter: Default::default(),
+				payout: Zero::zero(),
+			};
+			UnappliedSlashes::<T>::insert(era, slash_key.clone(), unapplied_slash);
 		}
-		UnappliedSlashes::<T>::insert(era, &unapplied_slashes);
 
-		let slash_indices: Vec<u32> = (0..s).collect();
+		let slash_keys: Vec<_> = (0..s)
+			.map(|i| (dummy_account(), Perbill::from_percent(i as u32 % 100), i))
+			.collect();
 
 		#[extrinsic_call]
-		_(RawOrigin::Root, era, slash_indices);
+		_(RawOrigin::Root, era, slash_keys.clone());
 
-		assert_eq!(UnappliedSlashes::<T>::get(&era).len(), (MAX_SLASHES - s) as usize);
+		// Ensure all `s` slashes are removed
+		for key in &slash_keys {
+			assert!(UnappliedSlashes::<T>::get(era, key).is_none());
+		}
 	}
 
 	#[benchmark]
@@ -1137,6 +1149,46 @@ mod benchmarks {
 		Ok(())
 	}
 
+	#[benchmark]
+	fn apply_slash() -> Result<(), BenchmarkError> {
+		let era = EraIndex::one();
+		ActiveEra::<T>::put(ActiveEraInfo { index: era, start: None });
+		let (validator, nominators) = create_validator_with_nominators::<T>(
+			T::MaxExposurePageSize::get() as u32,
+			T::MaxExposurePageSize::get() as u32,
+			false,
+			true,
+			RewardDestination::Staked,
+			era,
+		)?;
+		let slash_fraction = Perbill::from_percent(10);
+		let page_index = 0;
+		let slashed_balance = BalanceOf::<T>::from(10u32);
+
+		let slash_key = (validator.clone(), slash_fraction, page_index);
+		let slashed_nominators =
+			nominators.iter().map(|(n, _)| (n.clone(), slashed_balance)).collect::<Vec<_>>();
+
+		let unapplied_slash = UnappliedSlash::<T> {
+			validator: validator.clone(),
+			own: slashed_balance,
+			others: WeakBoundedVec::force_from(slashed_nominators, None),
+			reporter: Default::default(),
+			payout: Zero::zero(),
+		};
+
+		// Insert an unapplied slash to be processed.
+		UnappliedSlashes::<T>::insert(era, slash_key.clone(), unapplied_slash);
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(validator.clone()), era, slash_key.clone());
+
+		// Ensure the slash has been applied and removed.
+		assert!(UnappliedSlashes::<T>::get(era, &slash_key).is_none());
+
+		Ok(())
+	}
+
 	impl_benchmark_test_suite!(
 		Staking,
 		crate::mock::ExtBuilder::default().has_stakers(true),
diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs
index e11a82c8ebee48c71b87c6cf27822da54fe9f8cc..a5fe16e500b8f29bbba27eb5667cde1b73a1793b 100644
--- a/substrate/frame/staking/src/lib.rs
+++ b/substrate/frame/staking/src/lib.rs
@@ -344,7 +344,7 @@ mod pallet;
 extern crate alloc;
 
 use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
-use codec::{Decode, Encode, HasCompact, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, HasCompact, MaxEncodedLen};
 use frame_election_provider_support::ElectionProvider;
 use frame_support::{
 	defensive, defensive_assert,
@@ -353,7 +353,7 @@ use frame_support::{
 		ConstU32, Defensive, DefensiveMax, DefensiveSaturating, Get, LockIdentifier,
 	},
 	weights::Weight,
-	BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound,
+	BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, WeakBoundedVec,
 };
 use scale_info::TypeInfo;
 use sp_runtime::{
@@ -448,7 +448,18 @@ impl<AccountId: Ord> Default for EraRewardPoints<AccountId> {
 }
 
 /// A destination account for payment.
-#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	PartialEq,
+	Eq,
+	Copy,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum RewardDestination<AccountId> {
 	/// Pay into the stash account, increasing the amount at stake accordingly.
 	Staked,
@@ -465,7 +476,18 @@ pub enum RewardDestination<AccountId> {
 }
 
 /// Preference of what happens regarding validation.
-#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, Default, MaxEncodedLen)]
+#[derive(
+	PartialEq,
+	Eq,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	RuntimeDebug,
+	TypeInfo,
+	Default,
+	MaxEncodedLen,
+)]
 pub struct ValidatorPrefs {
 	/// Reward that validator takes up-front; only the rest is split between themselves and
 	/// nominators.
@@ -478,7 +500,17 @@ pub struct ValidatorPrefs {
 }
 
 /// Just a Balance/BlockNumber tuple to encode when a chunk of funds will be unlocked.
-#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	PartialEq,
+	Eq,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct UnlockChunk<Balance: HasCompact + MaxEncodedLen> {
 	/// Amount of funds to be unlocked.
 	#[codec(compact)]
@@ -845,7 +877,7 @@ impl<AccountId, Balance: HasCompact + Copy + AtLeast32BitUnsigned + codec::MaxEn
 
 /// A pending slash record. The value of the slash has been computed but not applied yet,
 /// rather deferred for several eras.
-#[derive(Encode, Decode, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen)]
+#[derive(Encode, Decode, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen, PartialEqNoBound)]
 #[scale_info(skip_type_params(T))]
 pub struct UnappliedSlash<T: Config> {
 	/// The stash ID of the offending validator.
@@ -853,7 +885,7 @@ pub struct UnappliedSlash<T: Config> {
 	/// The validator's own slash.
 	own: BalanceOf<T>,
 	/// All other slashed stakers and amounts.
-	others: BoundedVec<(T::AccountId, BalanceOf<T>), T::MaxExposurePageSize>,
+	others: WeakBoundedVec<(T::AccountId, BalanceOf<T>), T::MaxExposurePageSize>,
 	/// Reporters of the offence; bounty payout recipients.
 	reporter: Option<T::AccountId>,
 	/// The amount of payout.
@@ -894,12 +926,8 @@ impl<Balance, const MAX: u32> NominationsQuota<Balance> for FixedNominationsQuot
 ///
 /// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Config`
 pub trait SessionInterface<AccountId> {
-	/// Disable the validator at the given index, returns `false` if the validator was already
-	/// disabled or the index is out of bounds.
-	fn disable_validator(validator_index: u32) -> bool;
-	/// Re-enable a validator that was previously disabled. Returns `false` if the validator was
-	/// already enabled or the index is out of bounds.
-	fn enable_validator(validator_index: u32) -> bool;
+	/// Report an offending validator.
+	fn report_offence(validator: AccountId, severity: OffenceSeverity);
 	/// Get the validators from session.
 	fn validators() -> Vec<AccountId>;
 	/// Prune historical session tries up to but not including the given index.
@@ -909,10 +937,7 @@ pub trait SessionInterface<AccountId> {
 impl<T: Config> SessionInterface<<T as frame_system::Config>::AccountId> for T
 where
 	T: pallet_session::Config<ValidatorId = <T as frame_system::Config>::AccountId>,
-	T: pallet_session::historical::Config<
-		FullIdentification = <T as frame_system::Config>::AccountId,
-		FullIdentificationOf = IdentityOf<T>,
-	>,
+	T: pallet_session::historical::Config,
 	T::SessionHandler: pallet_session::SessionHandler<<T as frame_system::Config>::AccountId>,
 	T::SessionManager: pallet_session::SessionManager<<T as frame_system::Config>::AccountId>,
 	T::ValidatorIdOf: Convert<
@@ -920,12 +945,11 @@ where
 		Option<<T as frame_system::Config>::AccountId>,
 	>,
 {
-	fn disable_validator(validator_index: u32) -> bool {
-		<pallet_session::Pallet<T>>::disable_index(validator_index)
-	}
-
-	fn enable_validator(validator_index: u32) -> bool {
-		<pallet_session::Pallet<T>>::enable_index(validator_index)
+	fn report_offence(
+		validator: <T as frame_system::Config>::AccountId,
+		severity: OffenceSeverity,
+	) {
+		<pallet_session::Pallet<T>>::report_offence(validator, severity)
 	}
 
 	fn validators() -> Vec<<T as frame_system::Config>::AccountId> {
@@ -938,11 +962,8 @@ where
 }
 
 impl<AccountId> SessionInterface<AccountId> for () {
-	fn disable_validator(_: u32) -> bool {
-		true
-	}
-	fn enable_validator(_: u32) -> bool {
-		true
+	fn report_offence(_validator: AccountId, _severity: OffenceSeverity) {
+		()
 	}
 	fn validators() -> Vec<AccountId> {
 		Vec::new()
@@ -1008,6 +1029,7 @@ where
 	Eq,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	RuntimeDebug,
 	TypeInfo,
 	MaxEncodedLen,
@@ -1059,17 +1081,10 @@ impl<T: Config> Convert<T::AccountId, Option<Exposure<T::AccountId, BalanceOf<T>
 	}
 }
 
-pub struct IdentityOf<T>(core::marker::PhantomData<T>);
-
-impl<T: Config> Convert<T::AccountId, Option<T::AccountId>> for IdentityOf<T> {
-	fn convert(validator: T::AccountId) -> Option<T::AccountId> {
-		ActiveEra::<T>::get().and_then(|active_era| {
-			if ErasStakersOverview::<T>::contains_key(&active_era.index, &validator) {
-				Some(validator)
-			} else {
-				None
-			}
-		})
+pub struct NullIdentity;
+impl<T> Convert<T, Option<()>> for NullIdentity {
+	fn convert(_: T) -> Option<()> {
+		Some(())
 	}
 }
 
@@ -1339,200 +1354,3 @@ impl BenchmarkingConfig for TestBenchmarkingConfig {
 	type MaxValidators = frame_support::traits::ConstU32<100>;
 	type MaxNominators = frame_support::traits::ConstU32<100>;
 }
-
-/// Controls validator disabling
-pub trait DisablingStrategy<T: Config> {
-	/// Make a disabling decision. Returning a [`DisablingDecision`]
-	fn decision(
-		offender_stash: &T::AccountId,
-		offender_slash_severity: OffenceSeverity,
-		slash_era: EraIndex,
-		currently_disabled: &Vec<(u32, OffenceSeverity)>,
-	) -> DisablingDecision;
-}
-
-/// Helper struct representing a decision coming from a given [`DisablingStrategy`] implementing
-/// `decision`
-///
-/// `disable` is the index of the validator to disable,
-/// `reenable` is the index of the validator to re-enable.
-#[derive(Debug)]
-pub struct DisablingDecision {
-	pub disable: Option<u32>,
-	pub reenable: Option<u32>,
-}
-
-/// Calculate the disabling limit based on the number of validators and the disabling limit factor.
-///
-/// This is a sensible default implementation for the disabling limit factor for most disabling
-/// strategies.
-///
-/// Disabling limit factor n=2 -> 1/n = 1/2 = 50% of validators can be disabled
-fn factor_based_disable_limit(validators_len: usize, disabling_limit_factor: usize) -> usize {
-	validators_len
-		.saturating_sub(1)
-		.checked_div(disabling_limit_factor)
-		.unwrap_or_else(|| {
-			defensive!("DISABLING_LIMIT_FACTOR should not be 0");
-			0
-		})
-}
-
-/// Implementation of [`DisablingStrategy`] using factor_based_disable_limit which disables
-/// validators from the active set up to a threshold. `DISABLING_LIMIT_FACTOR` is the factor of the
-/// maximum disabled validators in the active set. E.g. setting this value to `3` means no more than
-/// 1/3 of the validators in the active set can be disabled in an era.
-///
-/// By default a factor of 3 is used which is the byzantine threshold.
-pub struct UpToLimitDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
-
-impl<const DISABLING_LIMIT_FACTOR: usize> UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR> {
-	/// Disabling limit calculated from the total number of validators in the active set. When
-	/// reached no more validators will be disabled.
-	pub fn disable_limit(validators_len: usize) -> usize {
-		factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
-	}
-}
-
-impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
-	for UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR>
-{
-	fn decision(
-		offender_stash: &T::AccountId,
-		_offender_slash_severity: OffenceSeverity,
-		slash_era: EraIndex,
-		currently_disabled: &Vec<(u32, OffenceSeverity)>,
-	) -> DisablingDecision {
-		let active_set = T::SessionInterface::validators();
-
-		// We don't disable more than the limit
-		if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
-			log!(
-				debug,
-				"Won't disable: reached disabling limit {:?}",
-				Self::disable_limit(active_set.len())
-			);
-			return DisablingDecision { disable: None, reenable: None }
-		}
-
-		// We don't disable for offences in previous eras
-		if ActiveEra::<T>::get().map(|e| e.index).unwrap_or_default() > slash_era {
-			log!(
-				debug,
-				"Won't disable: current_era {:?} > slash_era {:?}",
-				CurrentEra::<T>::get().unwrap_or_default(),
-				slash_era
-			);
-			return DisablingDecision { disable: None, reenable: None }
-		}
-
-		let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
-			idx as u32
-		} else {
-			log!(debug, "Won't disable: offender not in active set",);
-			return DisablingDecision { disable: None, reenable: None }
-		};
-
-		log!(debug, "Will disable {:?}", offender_idx);
-
-		DisablingDecision { disable: Some(offender_idx), reenable: None }
-	}
-}
-
-/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a
-/// limit (factor_based_disable_limit) and if the limit is reached and the new offender is higher
-/// (bigger punishment/severity) then it re-enables the lowest offender to free up space for the new
-/// offender.
-///
-/// This strategy is not based on cumulative severity of offences but only on the severity of the
-/// highest offence. Offender first committing a 25% offence and then a 50% offence will be treated
-/// the same as an offender committing 50% offence.
-///
-/// An extension of [`UpToLimitDisablingStrategy`].
-pub struct UpToLimitWithReEnablingDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
-
-impl<const DISABLING_LIMIT_FACTOR: usize>
-	UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
-{
-	/// Disabling limit calculated from the total number of validators in the active set. When
-	/// reached re-enabling logic might kick in.
-	pub fn disable_limit(validators_len: usize) -> usize {
-		factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
-	}
-}
-
-impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
-	for UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
-{
-	fn decision(
-		offender_stash: &T::AccountId,
-		offender_slash_severity: OffenceSeverity,
-		slash_era: EraIndex,
-		currently_disabled: &Vec<(u32, OffenceSeverity)>,
-	) -> DisablingDecision {
-		let active_set = T::SessionInterface::validators();
-
-		// We don't disable for offences in previous eras
-		if ActiveEra::<T>::get().map(|e| e.index).unwrap_or_default() > slash_era {
-			log!(
-				debug,
-				"Won't disable: current_era {:?} > slash_era {:?}",
-				Pallet::<T>::current_era().unwrap_or_default(),
-				slash_era
-			);
-			return DisablingDecision { disable: None, reenable: None }
-		}
-
-		// We don't disable validators that are not in the active set
-		let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
-			idx as u32
-		} else {
-			log!(debug, "Won't disable: offender not in active set",);
-			return DisablingDecision { disable: None, reenable: None }
-		};
-
-		// Check if offender is already disabled
-		if let Some((_, old_severity)) =
-			currently_disabled.iter().find(|(idx, _)| *idx == offender_idx)
-		{
-			if offender_slash_severity > *old_severity {
-				log!(debug, "Offender already disabled but with lower severity, will disable again to refresh severity of {:?}", offender_idx);
-				return DisablingDecision { disable: Some(offender_idx), reenable: None };
-			} else {
-				log!(debug, "Offender already disabled with higher or equal severity");
-				return DisablingDecision { disable: None, reenable: None };
-			}
-		}
-
-		// We don't disable more than the limit (but we can re-enable a smaller offender to make
-		// space)
-		if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
-			log!(
-				debug,
-				"Reached disabling limit {:?}, checking for re-enabling",
-				Self::disable_limit(active_set.len())
-			);
-
-			// Find the smallest offender to re-enable that is not higher than
-			// offender_slash_severity
-			if let Some((smallest_idx, _)) = currently_disabled
-				.iter()
-				.filter(|(_, severity)| *severity <= offender_slash_severity)
-				.min_by_key(|(_, severity)| *severity)
-			{
-				log!(debug, "Will disable {:?} and re-enable {:?}", offender_idx, smallest_idx);
-				return DisablingDecision {
-					disable: Some(offender_idx),
-					reenable: Some(*smallest_idx),
-				}
-			} else {
-				log!(debug, "No smaller offender found to re-enable");
-				return DisablingDecision { disable: None, reenable: None }
-			}
-		} else {
-			// If we are not at the limit, just disable the new offender and dont re-enable anyone
-			log!(debug, "Will disable {:?}", offender_idx);
-			return DisablingDecision { disable: Some(offender_idx), reenable: None }
-		}
-	}
-}
diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs
index 2cc0c77044e18582511e882f790263d42df0b8d3..5b0118da67ef72c1f16c34d2d700af85591b5d08 100644
--- a/substrate/frame/staking/src/migrations.rs
+++ b/substrate/frame/staking/src/migrations.rs
@@ -23,6 +23,7 @@ use frame_support::{
 	pallet_prelude::ValueQuery,
 	storage_alias,
 	traits::{GetStorageVersion, OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade},
+	Twox64Concat,
 };
 
 #[cfg(feature = "try-runtime")]
@@ -55,53 +56,80 @@ impl Default for ObsoleteReleases {
 #[storage_alias]
 type StorageVersion<T: Config> = StorageValue<Pallet<T>, ObsoleteReleases, ValueQuery>;
 
-/// Migrates to multi-page election support.
-///
-/// See: <https://github.com/paritytech/polkadot-sdk/pull/6034>
-///
-/// Important note: this migration should be released with the election provider configured by this
-/// pallet supporting up to 1 page. Thus,
-/// * `VoterSnapshotStatus` does not need migration, as it will always be `Status::Waiting` when
-/// the number of election pages is 1.
-/// * `ElectableStashes` must be populated iif there are collected exposures for a future era (i.e.
-/// exposures have been collected but `fn try_plan_new_era` was not called).
+/// Migrates `UnappliedSlashes` to a new storage structure to support paged slashing.
+/// This ensures that slashing can be processed in batches, preventing large storage operations in a
+/// single block.
 pub mod v17 {
 	use super::*;
 
-	pub struct VersionedMigrateV16ToV17<T>(core::marker::PhantomData<T>);
-	impl<T: Config> UncheckedOnRuntimeUpgrade for VersionedMigrateV16ToV17<T> {
+	#[derive(Encode, Decode, TypeInfo, MaxEncodedLen)]
+	struct OldUnappliedSlash<T: Config> {
+		validator: T::AccountId,
+		/// The validator's own slash.
+		own: BalanceOf<T>,
+		/// All other slashed stakers and amounts.
+		others: Vec<(T::AccountId, BalanceOf<T>)>,
+		/// Reporters of the offence; bounty payout recipients.
+		reporters: Vec<T::AccountId>,
+		/// The amount of payout.
+		payout: BalanceOf<T>,
+	}
+
+	#[frame_support::storage_alias]
+	pub type OldUnappliedSlashes<T: Config> =
+		StorageMap<Pallet<T>, Twox64Concat, EraIndex, Vec<OldUnappliedSlash<T>>, ValueQuery>;
+
+	#[frame_support::storage_alias]
+	pub type DisabledValidators<T: Config> =
+		StorageValue<Pallet<T>, BoundedVec<(u32, OffenceSeverity), ConstU32<100>>, ValueQuery>;
+
+	pub struct VersionUncheckedMigrateV16ToV17<T>(core::marker::PhantomData<T>);
+	impl<T: Config> UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV16ToV17<T> {
 		fn on_runtime_upgrade() -> Weight {
-			// Populates the `ElectableStashes` with the exposures of the next planning era if it
-			// is initialized (i.e. if the there are exposures collected for the next planning
-			// era).
+			let mut weight: Weight = Weight::zero();
+
+			OldUnappliedSlashes::<T>::drain().for_each(|(era, slashes)| {
+				weight.saturating_accrue(T::DbWeight::get().reads(1));
+
+				for slash in slashes {
+					let validator = slash.validator.clone();
+					let new_slash = UnappliedSlash {
+						validator: validator.clone(),
+						own: slash.own,
+						others: WeakBoundedVec::force_from(slash.others, None),
+						payout: slash.payout,
+						reporter: slash.reporters.first().cloned(),
+					};
+
+					// creating a slash key which is improbable to conflict with a new offence.
+					let slash_key = (validator, Perbill::from_percent(99), 9999);
+					UnappliedSlashes::<T>::insert(era, slash_key, new_slash);
+					weight.saturating_accrue(T::DbWeight::get().writes(1));
+				}
+			});
 
-			// note: we expect the migration to be released with a single page config.
-			debug_assert!(Pallet::<T>::election_pages() == 1);
+			weight
+		}
 
-			let next_era = CurrentEra::<T>::get().defensive_unwrap_or_default().saturating_add(1);
-			let prepared_exposures = ErasStakersOverview::<T>::iter_prefix(next_era)
-				.map(|(v, _)| v)
-				.collect::<Vec<_>>();
-			let migrated_stashes = prepared_exposures.len() as u32;
-
-			let result = Pallet::<T>::add_electables(prepared_exposures.into_iter());
-			debug_assert!(result.is_ok());
-
-			log!(info, "v17 applied successfully, migrated {:?}.", migrated_stashes);
-			T::DbWeight::get().reads_writes(
-				// 1x read per history depth and current era read.
-				(T::HistoryDepth::get() + 1u32).into(),
-				// 1x write per exposure migrated.
-				migrated_stashes.into(),
-			)
+		#[cfg(feature = "try-runtime")]
+		fn pre_upgrade() -> Result<Vec<u8>, sp_runtime::TryRuntimeError> {
+			let mut expected_slashes: u32 = 0;
+			OldUnappliedSlashes::<T>::iter().for_each(|(_, slashes)| {
+				expected_slashes += slashes.len() as u32;
+			});
+
+			Ok(expected_slashes.encode())
 		}
 
 		#[cfg(feature = "try-runtime")]
-		fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
-			frame_support::ensure!(
-				Pallet::<T>::on_chain_storage_version() >= 17,
-				"v17 not applied"
-			);
+		fn post_upgrade(state: Vec<u8>) -> Result<(), TryRuntimeError> {
+			let expected_slash_count =
+				u32::decode(&mut state.as_slice()).expect("Failed to decode state");
+
+			let actual_slash_count = UnappliedSlashes::<T>::iter().count() as u32;
+
+			ensure!(expected_slash_count == actual_slash_count, "Slash count mismatch");
+
 			Ok(())
 		}
 	}
@@ -109,10 +137,24 @@ pub mod v17 {
 	pub type MigrateV16ToV17<T> = VersionedMigration<
 		16,
 		17,
-		VersionedMigrateV16ToV17<T>,
+		VersionUncheckedMigrateV16ToV17<T>,
 		Pallet<T>,
 		<T as frame_system::Config>::DbWeight,
 	>;
+
+	pub struct MigrateDisabledToSession<T>(core::marker::PhantomData<T>);
+	impl<T: Config> pallet_session::migrations::v1::MigrateDisabledValidators
+		for MigrateDisabledToSession<T>
+	{
+		#[cfg(feature = "try-runtime")]
+		fn peek_disabled() -> Vec<(u32, OffenceSeverity)> {
+			DisabledValidators::<T>::get().into()
+		}
+
+		fn take_disabled() -> Vec<(u32, OffenceSeverity)> {
+			DisabledValidators::<T>::take().into()
+		}
+	}
 }
 
 /// Migrating `DisabledValidators` from `Vec<u32>` to `Vec<(u32, OffenceSeverity)>` to track offense
@@ -183,7 +225,7 @@ pub mod v16 {
 			// Decode state to get old_disabled_validators in a format of Vec<u32>
 			let old_disabled_validators =
 				Vec::<u32>::decode(&mut state.as_slice()).expect("Failed to decode state");
-			let new_disabled_validators = DisabledValidators::<T>::get();
+			let new_disabled_validators = v17::DisabledValidators::<T>::get();
 
 			// Compare lengths
 			frame_support::ensure!(
@@ -201,7 +243,7 @@ pub mod v16 {
 
 			// Verify severity
 			let max_severity = OffenceSeverity(Perbill::from_percent(100));
-			let new_disabled_validators = DisabledValidators::<T>::get();
+			let new_disabled_validators = v17::DisabledValidators::<T>::get();
 			for (_, severity) in new_disabled_validators {
 				frame_support::ensure!(severity == max_severity, "Severity mismatch");
 			}
@@ -224,7 +266,7 @@ pub mod v15 {
 	use super::*;
 
 	// The disabling strategy used by staking pallet
-	type DefaultDisablingStrategy = UpToLimitDisablingStrategy;
+	type DefaultDisablingStrategy = pallet_session::disabling::UpToLimitDisablingStrategy;
 
 	#[storage_alias]
 	pub(crate) type DisabledValidators<T: Config> = StorageValue<Pallet<T>, Vec<u32>, ValueQuery>;
diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs
index 621b106eaa87840732bda74d6d28a306ee8debc6..c366d67bfd1d56a5d58f07109978c01f517639e9 100644
--- a/substrate/frame/staking/src/mock.rs
+++ b/substrate/frame/staking/src/mock.rs
@@ -148,12 +148,14 @@ impl pallet_session::Config for Test {
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = crate::StashOf<Test>;
 	type NextSessionRotation = pallet_session::PeriodicSessions<Period, Offset>;
+	type DisablingStrategy =
+		pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>;
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = AccountId;
-	type FullIdentificationOf = IdentityOf<Test>;
+	type FullIdentification = ();
+	type FullIdentificationOf = NullIdentity;
 }
 impl pallet_authorship::Config for Test {
 	type FindAuthor = Author11;
@@ -363,8 +365,6 @@ impl crate::pallet::pallet::Config for Test {
 	type HistoryDepth = HistoryDepth;
 	type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch;
 	type EventListeners = EventListenerMock;
-	type DisablingStrategy =
-		pallet_staking::UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>;
 	type MaxInvulnerables = ConstU32<20>;
 	type MaxDisabledValidators = ConstU32<100>;
 }
@@ -826,6 +826,7 @@ pub(crate) fn on_offence_in_era(
 	>],
 	slash_fraction: &[Perbill],
 	era: EraIndex,
+	advance_processing_blocks: bool,
 ) {
 	// counter to keep track of how many blocks we need to advance to process all the offences.
 	let mut process_blocks = 0u32;
@@ -837,7 +838,9 @@ pub(crate) fn on_offence_in_era(
 	for &(bonded_era, start_session) in bonded_eras.iter() {
 		if bonded_era == era {
 			let _ = Staking::on_offence(offenders, slash_fraction, start_session);
-			advance_blocks(process_blocks as u64);
+			if advance_processing_blocks {
+				advance_blocks(process_blocks as u64);
+			}
 			return
 		} else if bonded_era > era {
 			break
@@ -850,7 +853,9 @@ pub(crate) fn on_offence_in_era(
 			slash_fraction,
 			pallet_staking::ErasStartSessionIndex::<Test>::get(era).unwrap(),
 		);
-		advance_blocks(process_blocks as u64);
+		if advance_processing_blocks {
+			advance_blocks(process_blocks as u64);
+		}
 	} else {
 		panic!("cannot slash in era {}", era);
 	}
@@ -862,22 +867,23 @@ pub(crate) fn on_offence_now(
 		pallet_session::historical::IdentificationTuple<Test>,
 	>],
 	slash_fraction: &[Perbill],
+	advance_processing_blocks: bool,
 ) {
 	let now = pallet_staking::ActiveEra::<Test>::get().unwrap().index;
-	on_offence_in_era(offenders, slash_fraction, now);
+	on_offence_in_era(offenders, slash_fraction, now, advance_processing_blocks);
 }
 pub(crate) fn offence_from(
 	offender: AccountId,
 	reporter: Option<AccountId>,
 ) -> OffenceDetails<AccountId, pallet_session::historical::IdentificationTuple<Test>> {
 	OffenceDetails {
-		offender: (offender, offender),
+		offender: (offender, ()),
 		reporters: reporter.map(|r| vec![(r)]).unwrap_or_default(),
 	}
 }
 
 pub(crate) fn add_slash(who: &AccountId) {
-	on_offence_now(&[offence_from(*who, None)], &[Perbill::from_percent(10)]);
+	on_offence_now(&[offence_from(*who, None)], &[Perbill::from_percent(10)], true);
 }
 
 /// Make all validator and nominator request their payment
@@ -1022,6 +1028,14 @@ pub(crate) fn staking_events() -> Vec<crate::Event<Test>> {
 		.collect()
 }
 
+pub(crate) fn session_events() -> Vec<pallet_session::Event<Test>> {
+	System::events()
+		.into_iter()
+		.map(|r| r.event)
+		.filter_map(|e| if let RuntimeEvent::Session(inner) = e { Some(inner) } else { None })
+		.collect()
+}
+
 parameter_types! {
 	static StakingEventsIndex: usize = 0;
 }
diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs
index ee624a57ec929047a73a78aec3cf718df1235d8c..ee58153bfbc949d8376d7b63a4e64166088367e5 100644
--- a/substrate/frame/staking/src/pallet/impls.rs
+++ b/substrate/frame/staking/src/pallet/impls.rs
@@ -39,7 +39,7 @@ use sp_runtime::{
 };
 use sp_staking::{
 	currency_to_vote::CurrencyToVote,
-	offence::{OffenceDetails, OnOffenceHandler},
+	offence::{OffenceDetails, OffenceSeverity, OnOffenceHandler},
 	EraIndex, OnStakingUpdate, Page, SessionIndex, Stake,
 	StakingAccount::{self, Controller, Stash},
 	StakingInterface,
@@ -47,10 +47,10 @@ use sp_staking::{
 
 use crate::{
 	asset, election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo,
-	BalanceOf, BoundedExposuresOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing,
-	IndividualExposure, LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, MaxWinnersPerPageOf,
-	Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface,
-	SnapshotStatus, StakingLedger, ValidatorPrefs, STAKING_ID,
+	BalanceOf, BoundedExposuresOf, EraInfo, EraPayout, Exposure, Forcing, IndividualExposure,
+	LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, MaxWinnersPerPageOf, Nominations,
+	NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, SnapshotStatus,
+	StakingLedger, ValidatorPrefs, STAKING_ID,
 };
 use alloc::{boxed::Box, vec, vec::Vec};
 
@@ -517,11 +517,12 @@ impl<T: Config> Pallet<T> {
 				frame_support::print("Warning: A session appears to have been skipped.");
 				Self::start_era(start_session);
 			}
-		}
 
-		// disable all offending validators that have been disabled for the whole era
-		for (index, _) in <DisabledValidators<T>>::get() {
-			T::SessionInterface::disable_validator(index);
+			// trigger election in the last session of the era
+			if start_session + 1 == next_active_era_start_session_index {
+				// TODO: trigger election
+				// Self::trigger_election();
+			}
 		}
 	}
 
@@ -609,9 +610,6 @@ impl<T: Config> Pallet<T> {
 			// Set ending era reward.
 			<ErasValidatorReward<T>>::insert(&active_era.index, validator_payout);
 			T::RewardRemainder::on_unbalanced(asset::issue::<T>(remainder));
-
-			// Clear disabled validators.
-			<DisabledValidators<T>>::kill();
 		}
 	}
 
@@ -981,7 +979,6 @@ impl<T: Config> Pallet<T> {
 
 	/// Apply previously-unapplied slashes on the beginning of a new era, after a delay.
 	pub(crate) fn apply_unapplied_slashes(active_era: EraIndex) {
-		// todo(ank4n): Make it multi block.
 		let mut slashes = UnappliedSlashes::<T>::iter_prefix(&active_era).take(1);
 		if let Some((key, slash)) = slashes.next() {
 			log!(
@@ -990,8 +987,8 @@ impl<T: Config> Pallet<T> {
 				slash,
 				active_era,
 			);
-			let slash_era = active_era.saturating_sub(T::SlashDeferDuration::get());
-			slashing::apply_slash::<T>(slash, slash_era);
+			let offence_era = active_era.saturating_sub(T::SlashDeferDuration::get());
+			slashing::apply_slash::<T>(slash, offence_era);
 			// remove the slash
 			UnappliedSlashes::<T>::remove(&active_era, &key);
 		}
@@ -1762,6 +1759,23 @@ impl<T: Config> historical::SessionManager<T::AccountId, Exposure<T::AccountId,
 	}
 }
 
+impl<T: Config> historical::SessionManager<T::AccountId, ()> for Pallet<T> {
+	fn new_session(new_index: SessionIndex) -> Option<Vec<(T::AccountId, ())>> {
+		<Self as pallet_session::SessionManager<_>>::new_session(new_index)
+			.map(|validators| validators.into_iter().map(|v| (v, ())).collect())
+	}
+	fn new_session_genesis(new_index: SessionIndex) -> Option<Vec<(T::AccountId, ())>> {
+		<Self as pallet_session::SessionManager<_>>::new_session_genesis(new_index)
+			.map(|validators| validators.into_iter().map(|v| (v, ())).collect())
+	}
+	fn start_session(start_index: SessionIndex) {
+		<Self as pallet_session::SessionManager<_>>::start_session(start_index)
+	}
+	fn end_session(end_index: SessionIndex) {
+		<Self as pallet_session::SessionManager<_>>::end_session(end_index)
+	}
+}
+
 /// Add reward points to block authors:
 /// * 20 points to the block producer for producing a (non-uncle) block,
 impl<T> pallet_authorship::EventHandler<T::AccountId, BlockNumberFor<T>> for Pallet<T>
@@ -1779,10 +1793,7 @@ impl<T: Config>
 	for Pallet<T>
 where
 	T: pallet_session::Config<ValidatorId = <T as frame_system::Config>::AccountId>,
-	T: pallet_session::historical::Config<
-		FullIdentification = <T as frame_system::Config>::AccountId,
-		FullIdentificationOf = IdentityOf<T>,
-	>,
+	T: pallet_session::historical::Config,
 	T::SessionHandler: pallet_session::SessionHandler<<T as frame_system::Config>::AccountId>,
 	T::SessionManager: pallet_session::SessionManager<<T as frame_system::Config>::AccountId>,
 	T::ValidatorIdOf: Convert<
@@ -1793,12 +1804,8 @@ where
 	/// When an offence is reported, it is split into pages and put in the offence queue.
 	/// As offence queue is processed, computed slashes are queued to be applied after the
 	/// `SlashDeferDuration`.
-	// todo(ank4n): Needs to be benched.
 	fn on_offence(
-		offenders: &[OffenceDetails<
-			T::AccountId,
-			pallet_session::historical::IdentificationTuple<T>,
-		>],
+		offenders: &[OffenceDetails<T::AccountId, historical::IdentificationTuple<T>>],
 		slash_fractions: &[Perbill],
 		slash_session: SessionIndex,
 	) -> Weight {
@@ -1810,11 +1817,20 @@ where
 			slash_session,
 		);
 
+		// todo(ank4n): Needs to be properly benched.
+		let mut consumed_weight = Weight::zero();
+		let mut add_db_reads_writes = |reads, writes| {
+			consumed_weight += T::DbWeight::get().reads_writes(reads, writes);
+		};
+
 		// Find the era to which offence belongs.
+		add_db_reads_writes(1, 0);
 		let Some(active_era) = ActiveEra::<T>::get() else {
 			log!(warn, "🦹 on_offence: no active era; ignoring offence");
-			return Weight::default();
+			return consumed_weight
 		};
+
+		add_db_reads_writes(1, 0);
 		let active_era_start_session =
 			ErasStartSessionIndex::<T>::get(active_era.index).unwrap_or(0);
 
@@ -1823,6 +1839,7 @@ where
 		let offence_era = if slash_session >= active_era_start_session {
 			active_era.index
 		} else {
+			add_db_reads_writes(1, 0);
 			match BondedEras::<T>::get()
 				.iter()
 				// Reverse because it's more likely to find reports from recent eras.
@@ -1840,6 +1857,7 @@ where
 			}
 		};
 
+		add_db_reads_writes(1, 0);
 		let invulnerables = Invulnerables::<T>::get();
 
 		for (details, slash_fraction) in offenders.iter().zip(slash_fractions) {
@@ -1850,6 +1868,7 @@ where
 				continue
 			}
 
+			add_db_reads_writes(1, 0);
 			let Some(exposure_overview) = <ErasStakersOverview<T>>::get(&offence_era, validator)
 			else {
 				// defensive: this implies offence is for a discarded era, and should already be
@@ -1863,20 +1882,29 @@ where
 				continue;
 			};
 
-			Self::deposit_event(Event::<T>::SlashReported {
+			Self::deposit_event(Event::<T>::OffenceReported {
 				validator: validator.clone(),
 				fraction: *slash_fraction,
-				slash_era: offence_era,
+				offence_era,
 			});
 
-			// add offending validator to the set of offenders.
-			slashing::add_offending_validator::<T>(validator, *slash_fraction, offence_era);
-
+			if offence_era == active_era.index {
+				// offence is in the current active era. Report it to session to maybe disable the
+				// validator.
+				add_db_reads_writes(2, 2);
+				T::SessionInterface::report_offence(
+					validator.clone(),
+					OffenceSeverity(*slash_fraction),
+				);
+			}
+			add_db_reads_writes(1, 0);
 			let prior_slash_fraction = ValidatorSlashInEra::<T>::get(offence_era, validator)
 				.map_or(Zero::zero(), |(f, _)| f);
 
+			add_db_reads_writes(1, 0);
 			if let Some(existing) = OffenceQueue::<T>::get(offence_era, validator) {
 				if slash_fraction.deconstruct() > existing.slash_fraction.deconstruct() {
+					add_db_reads_writes(0, 2);
 					OffenceQueue::<T>::insert(
 						offence_era,
 						validator,
@@ -1888,6 +1916,13 @@ where
 						},
 					);
 
+					// update the slash fraction in the `ValidatorSlashInEra` storage.
+					ValidatorSlashInEra::<T>::insert(
+						offence_era,
+						validator,
+						(slash_fraction, exposure_overview.own),
+					);
+
 					log!(
 						debug,
 						"🦹 updated slash for {:?}: {:?} (prior: {:?})",
@@ -1905,6 +1940,7 @@ where
 					);
 				}
 			} else if slash_fraction.deconstruct() > prior_slash_fraction.deconstruct() {
+				add_db_reads_writes(0, 3);
 				ValidatorSlashInEra::<T>::insert(
 					offence_era,
 					validator,
@@ -1917,8 +1953,8 @@ where
 					OffenceRecord {
 						reporter: details.reporters.first().cloned(),
 						reported_era: active_era.index,
-						offence_era,
-						// there are cases of validator with no exposure, so we default to 1.
+						// there are cases of validator with no exposure, hence 0 page, so we
+						// saturate to avoid underflow.
 						exposure_page: exposure_overview.page_count.saturating_sub(1),
 						slash_fraction: *slash_fraction,
 						prior_slash_fraction,
@@ -2378,8 +2414,7 @@ impl<T: Config> Pallet<T> {
 		Self::check_payees()?;
 		Self::check_nominators()?;
 		Self::check_paged_exposures()?;
-		Self::check_count()?;
-		Self::ensure_disabled_validators_sorted()
+		Self::check_count()
 	}
 
 	/// Test invariants of:
@@ -2391,6 +2426,7 @@ impl<T: Config> Pallet<T> {
 	///
 	/// -- SHOULD ONLY BE CALLED AT THE END OF A GIVEN BLOCK.
 	pub fn ensure_snapshot_metadata_state(now: BlockNumberFor<T>) -> Result<(), TryRuntimeError> {
+		use sp_runtime::traits::One;
 		let next_election = Self::next_election_prediction(now);
 		let pages = Self::election_pages().saturated_into::<BlockNumberFor<T>>();
 		let election_prep_start = next_election - pages;
@@ -2711,6 +2747,7 @@ impl<T: Config> Pallet<T> {
 		Ok(())
 	}
 
+	/* todo(ank4n): move to session try runtime
 	// Sorted by index
 	fn ensure_disabled_validators_sorted() -> Result<(), TryRuntimeError> {
 		ensure!(
@@ -2719,4 +2756,6 @@ impl<T: Config> Pallet<T> {
 		);
 		Ok(())
 	}
+
+	 */
 }
diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs
index 2635f8cab92eedffc1c6584ab71aa348c8bdc3d8..6bd52e2bce166befd6fd20340a06a83b36342a56 100644
--- a/substrate/frame/staking/src/pallet/mod.rs
+++ b/substrate/frame/staking/src/pallet/mod.rs
@@ -47,7 +47,6 @@ use sp_runtime::{
 };
 
 use sp_staking::{
-	offence::OffenceSeverity,
 	EraIndex, Page, SessionIndex,
 	StakingAccount::{self, Controller, Stash},
 	StakingInterface,
@@ -58,11 +57,10 @@ mod impls;
 pub use impls::*;
 
 use crate::{
-	asset, slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf,
-	DisablingStrategy, EraPayout, EraRewardPoints, ExposurePage, Forcing, LedgerIntegrityState,
-	MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf,
-	RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk,
-	ValidatorPrefs,
+	asset, slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout,
+	EraRewardPoints, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf,
+	NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination,
+	SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs,
 };
 
 // The speculative number of spans are used as an input of the weight annotation of
@@ -78,14 +76,14 @@ pub mod pallet {
 	use frame_election_provider_support::{ElectionDataProvider, PageIndex};
 
 	/// The in-code storage version.
-	const STORAGE_VERSION: StorageVersion = StorageVersion::new(16);
+	const STORAGE_VERSION: StorageVersion = StorageVersion::new(17);
 
 	#[pallet::pallet]
 	#[pallet::storage_version(STORAGE_VERSION)]
 	pub struct Pallet<T>(_);
 
 	/// Possible operations on the configuration values of this pallet.
-	#[derive(TypeInfo, Debug, Clone, Encode, Decode, PartialEq)]
+	#[derive(TypeInfo, Debug, Clone, Encode, Decode, DecodeWithMemTracking, PartialEq)]
 	pub enum ConfigOp<T: Default + Codec> {
 		/// Don't change.
 		Noop,
@@ -325,10 +323,6 @@ pub mod pallet {
 		#[pallet::no_default_bounds]
 		type EventListeners: sp_staking::OnStakingUpdate<Self::AccountId, BalanceOf<Self>>;
 
-		/// `DisablingStragegy` controls how validators are disabled
-		#[pallet::no_default_bounds]
-		type DisablingStrategy: DisablingStrategy<Self>;
-
 		/// Maximum number of invulnerable validators.
 		#[pallet::constant]
 		type MaxInvulnerables: Get<u32>;
@@ -396,7 +390,6 @@ pub mod pallet {
 			type MaxInvulnerables = ConstU32<20>;
 			type MaxDisabledValidators = ConstU32<100>;
 			type EventListeners = ();
-			type DisablingStrategy = crate::UpToLimitDisablingStrategy;
 			#[cfg(feature = "std")]
 			type BenchmarkingConfig = crate::TestBenchmarkingConfig;
 			type WeightInfo = ();
@@ -697,13 +690,13 @@ pub mod pallet {
 
 	/// All unapplied slashes that are queued for later.
 	#[pallet::storage]
-	#[pallet::unbounded]
 	pub type UnappliedSlashes<T: Config> = StorageDoubleMap<
 		_,
 		Twox64Concat,
 		EraIndex,
 		Twox64Concat,
-		(T::AccountId, Perbill, u32), // Second key: (Validator, slash_fraction, Page Index)
+		// Unique key for unapplied slashes: (validator, slash fraction, page index).
+		(T::AccountId, Perbill, u32),
 		UnappliedSlash<T>,
 		OptionQuery,
 	>;
@@ -757,20 +750,6 @@ pub mod pallet {
 	#[pallet::storage]
 	pub type CurrentPlannedSession<T> = StorageValue<_, SessionIndex, ValueQuery>;
 
-	/// Indices of validators that have offended in the active era. The offenders are disabled for a
-	/// whole era. For this reason they are kept here - only staking pallet knows about eras. The
-	/// implementor of [`DisablingStrategy`] defines if a validator should be disabled which
-	/// implicitly means that the implementor also controls the max number of disabled validators.
-	///
-	/// The vec is always kept sorted based on the u32 index so that we can find whether a given
-	/// validator has previously offended using binary search.
-	///
-	/// Additionally, each disabled validator is associated with an `OffenceSeverity` which
-	/// represents how severe is the offence that got the validator disabled.
-	#[pallet::storage]
-	pub type DisabledValidators<T: Config> =
-		StorageValue<_, BoundedVec<(u32, OffenceSeverity), T::MaxDisabledValidators>, ValueQuery>;
-
 	/// The threshold for when users can start calling `chill_other` for other validators /
 	/// nominators. The threshold is compared to the actual number of validators / nominators
 	/// (`CountFor*`) in the system compared to the configured max (`Max*Count`).
@@ -988,13 +967,6 @@ pub mod pallet {
 			staker: T::AccountId,
 			amount: BalanceOf<T>,
 		},
-		/// A slash for the given validator, for the given percentage of their stake, at the given
-		/// era as been reported.
-		SlashReported {
-			validator: T::AccountId,
-			fraction: Perbill,
-			slash_era: EraIndex,
-		},
 		/// An old slashing report from a prior era was discarded because it could
 		/// not be processed.
 		OldSlashingReportDiscarded {
@@ -1059,14 +1031,6 @@ pub mod pallet {
 		ControllerBatchDeprecated {
 			failures: u32,
 		},
-		/// Validator has been disabled.
-		ValidatorDisabled {
-			stash: T::AccountId,
-		},
-		/// Validator has been re-enabled.
-		ValidatorReenabled {
-			stash: T::AccountId,
-		},
 		/// Staking balance migrated from locks to holds, with any balance that could not be held
 		/// is force withdrawn.
 		CurrencyMigrated {
@@ -1086,6 +1050,26 @@ pub mod pallet {
 			page: PageIndex,
 			result: Result<u32, u32>,
 		},
+		/// An offence for the given validator, for the given percentage of their stake, at the
+		/// given era as been reported.
+		OffenceReported {
+			offence_era: EraIndex,
+			validator: T::AccountId,
+			fraction: Perbill,
+		},
+		/// An offence has been processed and the corresponding slash has been computed.
+		SlashComputed {
+			offence_era: EraIndex,
+			slash_era: EraIndex,
+			offender: T::AccountId,
+			page: u32,
+		},
+		/// An unapplied slash has been cancelled.
+		SlashCancelled {
+			slash_era: EraIndex,
+			slash_key: (T::AccountId, Perbill, u32),
+			payout: BalanceOf<T>,
+		},
 	}
 
 	#[pallet::error]
@@ -1103,8 +1087,8 @@ pub mod pallet {
 		EmptyTargets,
 		/// Duplicate index.
 		DuplicateIndex,
-		/// Slash record index out of bounds.
-		InvalidSlashIndex,
+		/// Slash record not found.
+		InvalidSlashRecord,
 		/// Cannot have a validator or nominator role, with value less than the minimum defined by
 		/// governance (see `MinValidatorBond` and `MinNominatorBond`). If unbonding is the
 		/// intention, `chill` first to remove one's role as validator/nominator.
@@ -1119,8 +1103,6 @@ pub mod pallet {
 		InvalidEraToReward,
 		/// Invalid number of nominations.
 		InvalidNumberOfNominations,
-		/// Items are not sorted and unique.
-		NotSortedAndUnique,
 		/// Rewards for this era have already been claimed for this validator.
 		AlreadyClaimed,
 		/// No nominators exist on this page.
@@ -1161,6 +1143,8 @@ pub mod pallet {
 		CannotReapStash,
 		/// The stake of this account is already migrated to `Fungible` holds.
 		AlreadyMigrated,
+		/// Era not yet started.
+		EraNotStarted,
 	}
 
 	#[pallet::hooks]
@@ -1169,6 +1153,21 @@ pub mod pallet {
 		/// that the `ElectableStashes` has been populated with all validators from all pages at
 		/// the time of the election.
 		fn on_initialize(now: BlockNumberFor<T>) -> Weight {
+			// todo(ank4n): Hacky bench. Do it properly.
+			let mut consumed_weight = slashing::process_offence::<T>();
+
+			consumed_weight.saturating_accrue(T::DbWeight::get().reads(1));
+			if let Some(active_era) = ActiveEra::<T>::get() {
+				let max_slash_page_size = T::MaxExposurePageSize::get();
+				consumed_weight.saturating_accrue(
+					T::DbWeight::get().reads_writes(
+						3 * max_slash_page_size as u64,
+						3 * max_slash_page_size as u64,
+					),
+				);
+				Self::apply_unapplied_slashes(active_era.index);
+			}
+
 			let pages = Self::election_pages();
 
 			// election ongoing, fetch the next page.
@@ -1196,7 +1195,9 @@ pub mod pallet {
 				}
 			};
 
-			T::WeightInfo::on_initialize_noop().saturating_add(inner_weight)
+			consumed_weight.saturating_accrue(inner_weight);
+
+			consumed_weight
 		}
 
 		fn on_finalize(_n: BlockNumberFor<T>) {
@@ -1959,34 +1960,35 @@ pub mod pallet {
 			Ok(())
 		}
 
-		/// Cancel enactment of a deferred slash.
+		/// Cancels scheduled slashes for a given era before they are applied.
 		///
-		/// Can be called by the `T::AdminOrigin`.
+		/// This function allows `T::AdminOrigin` to selectively remove pending slashes from
+		/// the `UnappliedSlashes` storage, preventing their enactment.
 		///
-		/// Parameters: era and indices of the slashes for that era to kill.
+		/// ## Parameters
+		/// - `era`: The staking era for which slashes were deferred.
+		/// - `slash_keys`: A list of slash keys identifying the slashes to remove. This is a tuple
+		/// of `(stash, slash_fraction, page_index)`.
 		#[pallet::call_index(17)]
-		#[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32))]
+		#[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_keys.len() as u32))]
 		pub fn cancel_deferred_slash(
 			origin: OriginFor<T>,
-			_era: EraIndex,
-			slash_indices: Vec<u32>,
+			era: EraIndex,
+			slash_keys: Vec<(T::AccountId, Perbill, u32)>,
 		) -> DispatchResult {
 			T::AdminOrigin::ensure_origin(origin)?;
-
-			ensure!(!slash_indices.is_empty(), Error::<T>::EmptyTargets);
-			ensure!(is_sorted_and_unique(&slash_indices), Error::<T>::NotSortedAndUnique);
-
-			// todo(ank4n): Refactor this to take vec of (stash, page), and kill `UnappliedSlashes`.
-			// let mut unapplied = UnappliedSlashes::<T>::get(&era);
-			// let last_item = slash_indices[slash_indices.len() - 1];
-			// ensure!((last_item as usize) < unapplied.len(), Error::<T>::InvalidSlashIndex);
-			//
-			// for (removed, index) in slash_indices.into_iter().enumerate() {
-			// 	let index = (index as usize) - removed;
-			// 	unapplied.remove(index);
-			// }
-			//
-			// UnappliedSlashes::<T>::insert(&era, &unapplied);
+			ensure!(!slash_keys.is_empty(), Error::<T>::EmptyTargets);
+
+			// Remove the unapplied slashes.
+			slash_keys.into_iter().for_each(|i| {
+				UnappliedSlashes::<T>::take(&era, &i).map(|unapplied_slash| {
+					Self::deposit_event(Event::<T>::SlashCancelled {
+						slash_era: era,
+						slash_key: i,
+						payout: unapplied_slash.payout,
+					});
+				});
+			});
 			Ok(())
 		}
 
@@ -2547,10 +2549,44 @@ pub mod pallet {
 			// Refund the transaction fee if successful.
 			Ok(Pays::No.into())
 		}
-	}
-}
 
-/// Check that list is sorted and has no duplicates.
-fn is_sorted_and_unique(list: &[u32]) -> bool {
-	list.windows(2).all(|w| w[0] < w[1])
+		/// Manually applies a deferred slash for a given era.
+		///
+		/// Normally, slashes are automatically applied shortly after the start of the `slash_era`.
+		/// This function exists as a **fallback mechanism** in case slashes were not applied due to
+		/// unexpected reasons. It allows anyone to manually apply an unapplied slash.
+		///
+		/// ## Parameters
+		/// - `slash_era`: The staking era in which the slash was originally scheduled.
+		/// - `slash_key`: A unique identifier for the slash, represented as a tuple:
+		///   - `stash`: The stash account of the validator being slashed.
+		///   - `slash_fraction`: The fraction of the stake that was slashed.
+		///   - `page_index`: The index of the exposure page being processed.
+		///
+		/// ## Behavior
+		/// - The function is **permissionless**—anyone can call it.
+		/// - The `slash_era` **must be the current era or a past era**. If it is in the future, the
+		///   call fails with `EraNotStarted`.
+		/// - The fee is waived if the slash is successfully applied.
+		///
+		/// ## TODO: Future Improvement
+		/// - Implement an **off-chain worker (OCW) task** to automatically apply slashes when there
+		///   is unused block space, improving efficiency.
+		#[pallet::call_index(31)]
+		#[pallet::weight(T::WeightInfo::apply_slash())]
+		pub fn apply_slash(
+			origin: OriginFor<T>,
+			slash_era: EraIndex,
+			slash_key: (T::AccountId, Perbill, u32),
+		) -> DispatchResultWithPostInfo {
+			let _ = ensure_signed(origin)?;
+			let active_era = ActiveEra::<T>::get().map(|a| a.index).unwrap_or_default();
+			ensure!(slash_era <= active_era, Error::<T>::EraNotStarted);
+			let unapplied_slash = UnappliedSlashes::<T>::take(&slash_era, &slash_key)
+				.ok_or(Error::<T>::InvalidSlashRecord)?;
+			slashing::apply_slash::<T>(unapplied_slash, slash_era);
+
+			Ok(Pays::No.into())
+		}
+	}
 }
diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs
index 451294dc46fc6dc9a9b052c17f2fc0f88c3e4131..6ec252048528f9c8ce9c15a6bc15b587bc0bbfb4 100644
--- a/substrate/frame/staking/src/slashing.rs
+++ b/substrate/frame/staking/src/slashing.rs
@@ -50,10 +50,9 @@
 //! Based on research at <https://research.web3.foundation/en/latest/polkadot/slashing/npos.html>
 
 use crate::{
-	asset, log, BalanceOf, Config, DisabledValidators, DisablingStrategy, EraInfo, Error,
-	NegativeImbalanceOf, NominatorSlashInEra, OffenceQueue, OffenceQueueEras, PagedExposure,
-	Pallet, Perbill, ProcessingOffence, SessionInterface, SlashRewardFraction, SpanSlash,
-	UnappliedSlash, UnappliedSlashes, ValidatorSlashInEra,
+	asset, BalanceOf, Config, DisabledValidators, DisablingStrategy, Error, Exposure,
+	NegativeImbalanceOf, NominatorSlashInEra, Pallet, Perbill, SessionInterface, SpanSlash,
+	UnappliedSlash, ValidatorSlashInEra,
 };
 use alloc::vec::Vec;
 use codec::{Decode, Encode, MaxEncodedLen};
@@ -64,9 +63,9 @@ use frame_support::{
 use scale_info::TypeInfo;
 use sp_runtime::{
 	traits::{Saturating, Zero},
-	BoundedVec, DispatchResult, RuntimeDebug,
+	DispatchResult, RuntimeDebug,
 };
-use sp_staking::{offence::OffenceSeverity, EraIndex, StakingInterface};
+use sp_staking::{EraIndex, StakingInterface};
 
 /// The proportion of the slashing reward to be paid out on the first slashing detection.
 /// This is f_1 in the paper.
@@ -210,16 +209,6 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> {
 	pub(crate) stash: &'a T::AccountId,
 	/// The proportion of the slash.
 	pub(crate) slash: Perbill,
-	/// The prior slash proportion of the validator if the validator has been reported multiple
-	/// times in the same era, and a new greater slash replaces the old one.
-	/// Invariant: slash > prior_slash
-	pub(crate) prior_slash: Perbill,
-	/// Determines whether the validator should be slashed.
-	///
-	/// Since a validator's total exposure can span multiple pages, we ensure the validator
-	/// is slashed only **once** per offence. This flag allows the caller to specify
-	/// whether the validator should be included in the slashing process.
-	pub(crate) should_slash_validator: bool,
 	/// The exposure of the stash and all nominators.
 	pub(crate) exposure: &'a PagedExposure<T::AccountId, BalanceOf<T>>,
 	/// The era where the offence occurred.
@@ -233,223 +222,69 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> {
 	pub(crate) reward_proportion: Perbill,
 }
 
-/// Represents an offence record within the staking system, capturing details about a slashing
-/// event.
-#[derive(Clone, Encode, Decode, TypeInfo, MaxEncodedLen)]
-pub struct OffenceRecord<AccountId> {
-	/// The account ID of the entity that reported the offence.
-	pub reporter: Option<AccountId>,
-
-	/// Era at which the offence was reported.
-	pub reported_era: EraIndex,
-
-	/// Era at which the offence actually occurred.
-	pub offence_era: EraIndex,
-
-	/// The specific page of the validator's exposure currently being processed.
-	///
-	/// Since a validator's total exposure can span multiple pages, this field serves as a pointer
-	/// to the current page being evaluated. The processing order starts from the last page
-	/// and moves backward, decrementing this value with each processed page.
-	///
-	/// This ensures that all pages are systematically handled, and it helps track when
-	/// the entire exposure has been processed.
-	pub exposure_page: u32,
-
-	/// The fraction of the validator's stake to be slashed for this offence.
-	pub slash_fraction: Perbill,
-
-	/// The previous slash fraction of the validator's stake before being updated.
-	/// If a new, higher slash fraction is reported, this field stores the prior fraction
-	/// that was overwritten. This helps in tracking changes in slashes across multiple reports for
-	/// the same era.
-	pub prior_slash_fraction: Perbill,
-}
-
-/// Loads next offence in the processing offence and returns the offense record to be processed.
+/// Computes a slash of a validator and nominators. It returns an unapplied
+/// record to be applied at some later point. Slashing metadata is updated in storage,
+/// since unapplied records are only rarely intended to be dropped.
 ///
-/// Note: this can mutate the following storage
-/// - `ProcessingOffence`
-/// - `OffenceQueue`
-/// - `OffenceQueueEras`
-fn next_offence<T: Config>() -> Option<(EraIndex, T::AccountId, OffenceRecord<T::AccountId>)> {
-	let processing_offence = ProcessingOffence::<T>::get();
-
-	if let Some((offence_era, offender, offence_record)) = processing_offence {
-		// If the exposure page is 0, then the offence has been processed.
-		if offence_record.exposure_page == 0 {
-			ProcessingOffence::<T>::kill();
-			return Some((offence_era, offender, offence_record))
-		}
-
-		// Update the next page.
-		ProcessingOffence::<T>::put((
-			offence_era,
-			&offender,
-			OffenceRecord {
-				// decrement the page index.
-				exposure_page: offence_record.exposure_page.defensive_saturating_sub(1),
-				..offence_record.clone()
-			},
-		));
+/// The pending slash record returned does not have initialized reporters. Those have
+/// to be set at a higher level, if any.
+pub(crate) fn compute_slash<T: Config>(
+	params: SlashParams<T>,
+) -> Option<UnappliedSlash<T::AccountId, BalanceOf<T>>> {
+	let mut reward_payout = Zero::zero();
+	let mut val_slashed = Zero::zero();
 
-		return Some((offence_era, offender, offence_record))
+	// is the slash amount here a maximum for the era?
+	let own_slash = params.slash * params.exposure.own;
+	if params.slash * params.exposure.total == Zero::zero() {
+		// kick out the validator even if they won't be slashed,
+		// as long as the misbehavior is from their most recent slashing span.
+		kick_out_if_recent::<T>(params);
+		return None
 	}
 
-	// Nothing in processing offence. Try to enqueue the next offence.
-	let Some(mut eras) = OffenceQueueEras::<T>::get() else { return None };
-	let Some(&oldest_era) = eras.first() else { return None };
-
-	let mut offence_iter = OffenceQueue::<T>::iter_prefix(oldest_era);
-	let next_offence = offence_iter.next();
-
-	if let Some((ref validator, ref offence_record)) = next_offence {
-		// Update the processing offence if the offence is multi-page.
-		if offence_record.exposure_page > 0 {
-			// update processing offence with the next page.
-			ProcessingOffence::<T>::put((
-				oldest_era,
-				validator.clone(),
-				OffenceRecord {
-					exposure_page: offence_record.exposure_page.defensive_saturating_sub(1),
-					..offence_record.clone()
-				},
-			));
-		}
+	let prior_slash_p = ValidatorSlashInEra::<T>::get(&params.slash_era, params.stash)
+		.map_or(Zero::zero(), |(prior_slash_proportion, _)| prior_slash_proportion);
 
-		// Remove from `OffenceQueue`
-		OffenceQueue::<T>::remove(oldest_era, &validator);
-	}
-
-	// If there are no offences left for the era, remove the era from `OffenceQueueEras`.
-	if offence_iter.next().is_none() {
-		eras.remove(0); // Remove the oldest era
-		OffenceQueueEras::<T>::put(eras);
+	// compare slash proportions rather than slash values to avoid issues due to rounding
+	// error.
+	if params.slash.deconstruct() > prior_slash_p.deconstruct() {
+		ValidatorSlashInEra::<T>::insert(
+			&params.slash_era,
+			params.stash,
+			&(params.slash, own_slash),
+		);
+	} else {
+		// we slash based on the max in era - this new event is not the max,
+		// so neither the validator or any nominators will need an update.
+		//
+		// this does lead to a divergence of our system from the paper, which
+		// pays out some reward even if the latest report is not max-in-era.
+		// we opt to avoid the nominator lookups and edits and leave more rewards
+		// for more drastic misbehavior.
+		return None
 	}
 
-	next_offence.map(|(v, o)| (oldest_era, v, o))
-}
-
-/// Infallible function to process an offence.
-pub(crate) fn process_offence<T: Config>() {
-	let Some((offence_era, offender, offence_record)) = next_offence::<T>() else {
-		return;
-	};
-
-	log!(
-		debug,
-		"🦹 Processing offence for {:?} in era {:?} with slash fraction {:?}",
-		offender,
-		offence_era,
-		offence_record.slash_fraction,
-	);
-
-	let reward_proportion = SlashRewardFraction::<T>::get();
-	let Some(exposure) =
-		EraInfo::<T>::get_paged_exposure(offence_era, &offender, offence_record.exposure_page)
-	else {
-		// this can only happen if the offence was valid at the time of reporting but became too old
-		// at the time of computing and should be discarded.
-		return
-	};
-
-	let slash_page = offence_record.exposure_page;
-	// The validator is slashed only once per offence, specifically along with the last page of its
-	// exposure.
-	let exposure_pages = exposure.exposure_metadata.page_count;
-	let should_slash_validator =
-		exposure_pages == 0 || slash_page == exposure.exposure_metadata.page_count - 1;
-
-	let slash_defer_duration = T::SlashDeferDuration::get();
-	let slash_era = offence_era.saturating_add(slash_defer_duration);
-	let window_start = offence_record.reported_era.saturating_sub(T::BondingDuration::get());
-
-	let Some(mut unapplied) = compute_slash::<T>(SlashParams {
-		stash: &offender,
-		slash: offence_record.slash_fraction,
-		prior_slash: offence_record.prior_slash_fraction,
-		should_slash_validator,
-		exposure: &exposure,
-		slash_era: offence_era,
-		window_start,
-		now: offence_record.reported_era,
-		reward_proportion,
-	}) else {
-		log!(
-			debug,
-			"🦹 Slash of {:?}% happened in {:?} (reported in {:?}) is discarded, as could not compute slash",
-			offence_record.slash_fraction,
-			offence_era,
-			offence_record.reported_era,
+	// apply slash to validator.
+	{
+		let mut spans = fetch_spans::<T>(
+			params.stash,
+			params.window_start,
+			&mut reward_payout,
+			&mut val_slashed,
+			params.reward_proportion,
 		);
-		// No slash to apply. Discard.
-		return
-	};
 
-	<Pallet<T>>::deposit_event(super::Event::<T>::SlashComputed {
-		offence_era,
-		slash_era,
-		offender: offender.clone(),
-		page: slash_page,
-	});
-
-	log!(
-		debug,
-		"🦹 Slash of {:?}% happened in {:?} (reported in {:?}) is computed",
-		offence_record.slash_fraction,
-		offence_era,
-		offence_record.reported_era,
-	);
+		let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash);
 
-	// add the reporter to the unapplied slash.
-	unapplied.reporter = offence_record.reporter;
-
-	if slash_defer_duration == 0 {
-		// Apply right away.
-		log!(
-			debug,
-			"🦹 applying slash instantly of {:?}% happened in {:?} (reported in {:?}) to {:?}",
-			offence_record.slash_fraction,
-			offence_era,
-			offence_record.reported_era,
-			offender,
-		);
-		apply_slash::<T>(unapplied, offence_era);
-	} else {
-		// Historical Note: Previously, with BondingDuration = 28 and SlashDeferDuration = 27,
-		// slashes were applied at the start of the 28th era from `offence_era`.
-		// However, with paged slashing, applying slashes now takes multiple blocks.
-		// To account for this delay, slashes are now applied at the start of the 27th era from
-		// `offence_era`.
-		log!(
-			debug,
-			"🦹 deferring slash of {:?}% happened in {:?} (reported in {:?}) to {:?}",
-			offence_record.slash_fraction,
-			offence_era,
-			offence_record.reported_era,
-			slash_era,
-		);
-		UnappliedSlashes::<T>::insert(
-			slash_era,
-			(offender, offence_record.slash_fraction, slash_page),
-			unapplied,
-		);
+		if target_span == Some(spans.span_index()) {
+			// misbehavior occurred within the current slashing span - end current span.
+			// Check <https://github.com/paritytech/polkadot-sdk/issues/2650> for details.
+			spans.end_span(params.now);
+		}
 	}
-}
 
-/// Computes a slash of a validator and nominators. It returns an unapplied
-/// record to be applied at some later point. Slashing metadata is updated in storage,
-/// since unapplied records are only rarely intended to be dropped.
-///
-/// The pending slash record returned does not have initialized reporters. Those have
-/// to be set at a higher level, if any.
-///
-/// If `nomintors_only` is set to `true`, only the nominator slashes will be computed.
-pub(crate) fn compute_slash<T: Config>(params: SlashParams<T>) -> Option<UnappliedSlash<T>> {
-	let (val_slashed, mut reward_payout) = params
-		.should_slash_validator
-		.then(|| slash_validator::<T>(params.clone()))
-		.unwrap_or((Zero::zero(), Zero::zero()));
+	add_offending_validator::<T>(&params);
 
 	let mut nominators_slashed = Vec::new();
 	let (nom_slashed, nom_reward_payout) =
@@ -459,8 +294,8 @@ pub(crate) fn compute_slash<T: Config>(params: SlashParams<T>) -> Option<Unappli
 	(nom_slashed + val_slashed > Zero::zero()).then_some(UnappliedSlash {
 		validator: params.stash.clone(),
 		own: val_slashed,
-		others: BoundedVec::truncate_from(nominators_slashed),
-		reporter: None,
+		others: nominators_slashed,
+		reporters: Vec::new(),
 		payout: reward_payout,
 	})
 }
@@ -487,14 +322,11 @@ fn kick_out_if_recent<T: Config>(params: SlashParams<T>) {
 
 /// Inform the [`DisablingStrategy`] implementation about the new offender and disable the list of
 /// validators provided by [`decision`].
-pub(crate) fn add_offending_validator<T: Config>(
-	stash: &T::AccountId,
-	slash: Perbill,
-	offence_era: EraIndex,
-) {
+fn add_offending_validator<T: Config>(params: &SlashParams<T>) {
 	DisabledValidators::<T>::mutate(|disabled| {
-		let new_severity = OffenceSeverity(slash);
-		let decision = T::DisablingStrategy::decision(stash, new_severity, offence_era, &disabled);
+		let new_severity = OffenceSeverity(params.slash);
+		let decision =
+			T::DisablingStrategy::decision(params.stash, new_severity, params.slash_era, &disabled);
 
 		if let Some(offender_idx) = decision.disable {
 			// Check if the offender is already disabled
diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs
index 26e8666f454b0bc040a59112681997a936a8253c..84df26389f702c163e83f72f916b6937b0fef3fb 100644
--- a/substrate/frame/staking/src/tests.rs
+++ b/substrate/frame/staking/src/tests.rs
@@ -34,9 +34,14 @@ use frame_support::{
 	},
 	BoundedVec,
 };
-
 use mock::*;
 use pallet_balances::Error as BalancesError;
+use pallet_session::{
+	disabling::{
+		DisablingStrategy, UpToLimitDisablingStrategy, UpToLimitWithReEnablingDisablingStrategy,
+	},
+	Event as SessionEvent,
+};
 use sp_runtime::{
 	assert_eq_error_rate, bounded_vec,
 	traits::{BadOrigin, Dispatchable},
@@ -44,7 +49,7 @@ use sp_runtime::{
 };
 use sp_staking::{
 	offence::{OffenceDetails, OnOffenceHandler},
-	SessionIndex,
+	SessionIndex, StakingInterface,
 };
 use substrate_test_utils::assert_eq_uvec;
 
@@ -748,7 +753,7 @@ fn nominators_also_get_slashed_pro_rata() {
 			let exposed_nominator = initial_exposure.others.first().unwrap().value;
 
 			// 11 goes offline
-			on_offence_now(&[offence_from(11, None)], &[slash_percent]);
+			on_offence_now(&[offence_from(11, None)], &[slash_percent], true);
 
 			// both stakes must have been decreased.
 			assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake);
@@ -2445,7 +2450,7 @@ fn reward_validator_slashing_validator_does_not_overflow() {
 		);
 
 		// Check slashing
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true);
 
 		assert_eq!(asset::stakeable_balance::<Test>(&11), stake - 1);
 		assert_eq!(asset::stakeable_balance::<Test>(&2), 1);
@@ -2538,7 +2543,7 @@ fn era_is_always_same_length() {
 #[test]
 fn offence_doesnt_force_new_era() {
 	ExtBuilder::default().build_and_execute(|| {
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)], true);
 
 		assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 	});
@@ -2550,7 +2555,7 @@ fn offence_ensures_new_era_without_clobbering() {
 		assert_ok!(Staking::force_new_era_always(RuntimeOrigin::root()));
 		assert_eq!(ForceEra::<Test>::get(), Forcing::ForceAlways);
 
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)], true);
 
 		assert_eq!(ForceEra::<Test>::get(), Forcing::ForceAlways);
 	});
@@ -2568,7 +2573,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() {
 			assert!(Session::validators().contains(&11));
 			assert!(<Validators<Test>>::contains_key(11));
 
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true);
 
 			assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 			assert!(is_disabled(11));
@@ -2588,7 +2593,7 @@ fn slashing_performed_according_exposure() {
 		assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000);
 
 		// Handle an offence with a historical exposure.
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)], true);
 
 		// The stash account should be slashed for 250 (50% of 500).
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000 / 2);
@@ -2606,7 +2611,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() {
 			assert!(<Validators<Test>>::contains_key(11));
 			assert!(Session::validators().contains(&11));
 
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true);
 
 			assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 			assert!(is_disabled(11));
@@ -2622,7 +2627,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() {
 			mock::start_active_era(3);
 
 			// an offence committed in era 1 is reported in era 3
-			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(0)], 1);
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(0)], 1, true);
 
 			// the validator doesn't get disabled for an old offence
 			assert!(Validators::<Test>::iter().any(|(stash, _)| stash == 11));
@@ -2636,6 +2641,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() {
 				// NOTE: A 100% slash here would clean up the account, causing de-registration.
 				&[Perbill::from_percent(95)],
 				1,
+				true,
 			);
 
 			// the validator doesn't get disabled again
@@ -2657,8 +2663,9 @@ fn only_first_reporter_receive_the_slice() {
 		assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance);
 
 		on_offence_now(
-			&[OffenceDetails { offender: (11, 11), reporters: vec![1, 2] }],
+			&[OffenceDetails { offender: (11, ()), reporters: vec![1, 2] }],
 			&[Perbill::from_percent(50)],
+			true,
 		);
 
 		// F1 * (reward_proportion * slash - 0)
@@ -2679,14 +2686,14 @@ fn subsequent_reports_in_same_span_pay_out_less() {
 
 		assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance);
 
-		on_offence_now(&[offence_from(11, Some(1))], &[Perbill::from_percent(20)]);
+		on_offence_now(&[offence_from(11, Some(1))], &[Perbill::from_percent(20)], true);
 
 		// F1 * (reward_proportion * slash - 0)
 		// 50% * (10% * initial_balance * 20%)
 		let reward = (initial_balance / 5) / 20;
 		assert_eq!(asset::total_balance::<Test>(&1), 10 + reward);
 
-		on_offence_now(&[offence_from(11, Some(1))], &[Perbill::from_percent(50)]);
+		on_offence_now(&[offence_from(11, Some(1))], &[Perbill::from_percent(50)], true);
 
 		let prior_payout = reward;
 
@@ -2716,6 +2723,7 @@ fn invulnerables_are_not_slashed() {
 		on_offence_now(
 			&[offence_from(11, None), offence_from(21, None)],
 			&[Perbill::from_percent(50), Perbill::from_percent(20)],
+			true,
 		);
 
 		// The validator 11 hasn't been slashed, but 21 has been.
@@ -2739,7 +2747,7 @@ fn dont_slash_if_fraction_is_zero() {
 	ExtBuilder::default().build_and_execute(|| {
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true);
 
 		// The validator hasn't been slashed. The new era is not forced.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
@@ -2754,18 +2762,18 @@ fn only_slash_for_max_in_era() {
 	ExtBuilder::default().build_and_execute(|| {
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)], true);
 
 		// The validator has been slashed and has been force-chilled.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 500);
 		assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)], true);
 
 		// The validator has not been slashed additionally.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 500);
 
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(60)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(60)], true);
 
 		// The validator got slashed 10% more.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 400);
@@ -2781,13 +2789,13 @@ fn garbage_collection_after_slashing() {
 		.build_and_execute(|| {
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 2000);
 
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 2000 - 200);
 			assert!(SlashingSpans::<Test>::get(&11).is_some());
 			assert_eq!(SpanSlash::<Test>::get(&(11, 0)).amount(), &200);
 
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true);
 
 			// validator and nominator slash in era are garbage-collected by era change,
 			// so we don't test those here.
@@ -2863,7 +2871,7 @@ fn slashing_nominators_by_span_max() {
 		let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value;
 		let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value;
 
-		on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(10)], 2);
+		on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(10)], 2, true);
 
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 900);
 
@@ -2882,7 +2890,7 @@ fn slashing_nominators_by_span_max() {
 		assert_eq!(get_span(101).iter().collect::<Vec<_>>(), expected_spans);
 
 		// second slash: higher era, higher value, same span.
-		on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(30)], 3);
+		on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(30)], 3, true);
 
 		// 11 was not further slashed, but 21 and 101 were.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 900);
@@ -2896,7 +2904,7 @@ fn slashing_nominators_by_span_max() {
 
 		// third slash: in same era and on same validator as first, higher
 		// in-era value, but lower slash value than slash 2.
-		on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(20)], 2);
+		on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(20)], 2, true);
 
 		// 11 was further slashed, but 21 and 101 were not.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 800);
@@ -2923,7 +2931,7 @@ fn slashes_are_summed_across_spans() {
 
 		let get_span = |account| SlashingSpans::<Test>::get(&account).unwrap();
 
-		on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)]);
+		on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)], true);
 
 		let expected_spans = vec![
 			slashing::SlashingSpan { index: 1, start: 4, length: None },
@@ -2940,7 +2948,7 @@ fn slashes_are_summed_across_spans() {
 
 		assert_eq!(Staking::slashable_balance_of(&21), 900);
 
-		on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)]);
+		on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)], true);
 
 		let expected_spans = vec![
 			slashing::SlashingSpan { index: 2, start: 5, length: None },
@@ -2969,7 +2977,7 @@ fn deferred_slashes_are_deferred() {
 		// only 1 page of exposure, so slashes will be applied in one block.
 		assert_eq!(EraInfo::<Test>::get_page_count(1, &11), 1);
 
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
 		// nominations are not removed regardless of the deferring.
 		assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -2985,7 +2993,7 @@ fn deferred_slashes_are_deferred() {
 		assert!(matches!(
 			staking_events_since_last_call().as_slice(),
 			&[
-				Event::SlashReported { validator: 11, slash_era: 1, .. },
+				Event::OffenceReported { validator: 11, offence_era: 1, .. },
 				Event::SlashComputed { offence_era: 1, slash_era: 3, page: 0, .. },
 				Event::PagedElectionProceeded { page: 0, result: Ok(2) },
 				Event::StakersElected,
@@ -3006,7 +3014,7 @@ fn deferred_slashes_are_deferred() {
 		assert!(matches!(
 			staking_events_since_last_call().as_slice(),
 			&[
-				Event::SlashReported { validator: 11, slash_era: 1, .. },
+				// era 3 elections
 				Event::PagedElectionProceeded { page: 0, result: Ok(2) },
 				Event::StakersElected,
 				Event::EraPaid { .. },
@@ -3024,9 +3032,6 @@ fn retroactive_deferred_slashes_two_eras_before() {
 		assert_eq!(BondingDuration::get(), 3);
 
 		mock::start_active_era(1);
-		let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11);
-
-		mock::start_active_era(3);
 
 		assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
 
@@ -3035,12 +3040,17 @@ fn retroactive_deferred_slashes_two_eras_before() {
 			&[offence_from(11, None)],
 			&[Perbill::from_percent(10)],
 			1, // should be deferred for two eras, and applied at the beginning of era 3.
+			true,
 		);
 
+		mock::start_active_era(3);
+		// Slashes not applied yet. Will apply in the next block after era starts.
+		advance_blocks(1);
+
 		assert!(matches!(
 			staking_events_since_last_call().as_slice(),
 			&[
-				Event::SlashReported { validator: 11, slash_era: 1, .. },
+				Event::OffenceReported { validator: 11, offence_era: 1, .. },
 				Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 },
 				..,
 				Event::Slashed { staker: 11, amount: 100 },
@@ -3055,9 +3065,6 @@ fn retroactive_deferred_slashes_one_before() {
 	ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| {
 		assert_eq!(BondingDuration::get(), 3);
 
-		mock::start_active_era(1);
-		let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11);
-
 		// unbond at slash era.
 		mock::start_active_era(2);
 		assert_ok!(Staking::chill(RuntimeOrigin::signed(11)));
@@ -3068,7 +3075,8 @@ fn retroactive_deferred_slashes_one_before() {
 		on_offence_in_era(
 			&[offence_from(11, None)],
 			&[Perbill::from_percent(10)],
-			2, // should be deferred for two eras, and applied at the beginning of era 4.
+			2, // should be deferred for two eras, and applied before the beginning of era 4.
+			true,
 		);
 
 		mock::start_active_era(4);
@@ -3080,7 +3088,7 @@ fn retroactive_deferred_slashes_one_before() {
 		assert!(matches!(
 			staking_events_since_last_call().as_slice(),
 			&[
-				Event::SlashReported { validator: 11, slash_era: 2, .. },
+				Event::OffenceReported { validator: 11, offence_era: 2, .. },
 				Event::SlashComputed { offence_era: 2, slash_era: 4, offender: 11, page: 0 },
 				..,
 				Event::Slashed { staker: 11, amount: 100 },
@@ -3107,7 +3115,7 @@ fn staker_cannot_bail_deferred_slash() {
 		let exposure = Staking::eras_stakers(active_era(), &11);
 		let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value;
 
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
 		// now we chill
 		assert_ok!(Staking::chill(RuntimeOrigin::signed(101)));
@@ -3178,7 +3186,8 @@ fn remove_deferred() {
 		let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value;
 
 		// deferred to start of era 3.
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]);
+		let slash_fraction_one = Perbill::from_percent(10);
+		on_offence_now(&[offence_from(11, None)], &[slash_fraction_one], true);
 
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
@@ -3187,7 +3196,34 @@ fn remove_deferred() {
 
 		// reported later, but deferred to start of era 3 as well.
 		System::reset_events();
-		on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(15)], 1);
+		let slash_fraction_two = Perbill::from_percent(15);
+		on_offence_in_era(&[offence_from(11, None)], &[slash_fraction_two], 1, true);
+
+		assert_eq!(
+			UnappliedSlashes::<Test>::iter_prefix(&3).collect::<Vec<_>>(),
+			vec![
+				(
+					(11, slash_fraction_one, 0),
+					UnappliedSlash {
+						validator: 11,
+						own: 100,
+						others: bounded_vec![(101, 12)],
+						reporter: None,
+						payout: 5
+					}
+				),
+				(
+					(11, slash_fraction_two, 0),
+					UnappliedSlash {
+						validator: 11,
+						own: 50,
+						others: bounded_vec![(101, 7)],
+						reporter: None,
+						payout: 6
+					}
+				),
+			]
+		);
 
 		// fails if empty
 		assert_noop!(
@@ -3195,9 +3231,13 @@ fn remove_deferred() {
 			Error::<Test>::EmptyTargets
 		);
 
-		// cancel one of them.
-		// fixme: this is not working
-		assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0]));
+		// cancel the slash with 10%.
+		assert_ok!(Staking::cancel_deferred_slash(
+			RuntimeOrigin::root(),
+			3,
+			vec![(11, slash_fraction_one, 0)]
+		));
+		assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&3).count(), 1);
 
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
@@ -3215,16 +3255,21 @@ fn remove_deferred() {
 		assert!(matches!(
 			staking_events_since_last_call().as_slice(),
 			&[
-				Event::SlashReported { validator: 11, slash_era: 1, .. },
+				Event::OffenceReported { validator: 11, offence_era: 1, .. },
 				Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 },
+				Event::SlashCancelled {
+					slash_era: 3,
+					slash_key: (11, fraction, 0),
+					payout: 5
+				},
 				..,
 				Event::Slashed { staker: 11, amount: 50 },
 				Event::Slashed { staker: 101, amount: 7 }
-			]
+			] if fraction == slash_fraction_one
 		));
 
 		let slash_10 = Perbill::from_percent(10);
-		let slash_15 = Perbill::from_percent(15);
+		let slash_15 = slash_fraction_two;
 		let initial_slash = slash_10 * nominated_value;
 
 		let total_slash = slash_15 * nominated_value;
@@ -3239,53 +3284,48 @@ fn remove_deferred() {
 #[test]
 #[ignore]
 fn remove_multi_deferred() {
-	ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| {
-		mock::start_active_era(1);
-
-		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
+	ExtBuilder::default()
+		.slash_defer_duration(2)
+		.validator_count(4)
+		.set_status(41, StakerStatus::Validator)
+		.set_status(51, StakerStatus::Validator)
+		.build_and_execute(|| {
+			mock::start_active_era(1);
 
-		let exposure = Staking::eras_stakers(active_era(), &11);
-		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
+			assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
+			assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
 
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
-		on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)]);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)], true);
 
-		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)], true);
 
-		// fixme: following are not validators. Exposure is ignored.
-		on_offence_now(&[offence_from(42, None)], &[Perbill::from_percent(25)]);
+			on_offence_now(&[offence_from(41, None)], &[Perbill::from_percent(25)], true);
 
-		on_offence_now(&[offence_from(69, None)], &[Perbill::from_percent(25)]);
+			on_offence_now(&[offence_from(51, None)], &[Perbill::from_percent(25)], true);
 
-		println!("{:?}", UnappliedSlashes::<Test>::iter_prefix(&3).collect::<Vec<_>>());
-		// assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&3).count(), 5);
+			// there are 5 slashes to be applied in era 3.
+			assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&3).count(), 5);
 
-		// fails if list is not sorted
-		assert_noop!(
-			Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![2, 0, 4]),
-			Error::<Test>::NotSortedAndUnique
-		);
-		// fails if list is not unique
-		assert_noop!(
-			Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![0, 2, 2]),
-			Error::<Test>::NotSortedAndUnique
-		);
-		// fails if bad index
-		assert_noop!(
-			Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![1, 2, 3, 4, 5]),
-			Error::<Test>::InvalidSlashIndex
-		);
-
-		assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0, 2, 4]));
+			// lets cancel 3 of them.
+			assert_ok!(Staking::cancel_deferred_slash(
+				RuntimeOrigin::root(),
+				3,
+				vec![
+					(11, Perbill::from_percent(10), 0),
+					(11, Perbill::from_percent(25), 0),
+					(51, Perbill::from_percent(25), 0),
+				]
+			));
 
-		/* fixme
-		 let slashes = UnappliedSlashes::<Test>::get(&4);
-		assert_eq!(slashes.len(), 2);
-		assert_eq!(slashes[0].validator, 21);
-		assert_eq!(slashes[1].validator, 42);
-		*/
-	})
+			let slashes = UnappliedSlashes::<Test>::iter_prefix(&3).collect::<Vec<_>>();
+			assert_eq!(slashes.len(), 2);
+			// the first item in the remaining slashes belongs to validator 41.
+			assert_eq!(slashes[0].0, (41, Perbill::from_percent(25), 0));
+			// the second and last item in the remaining slashes belongs to validator 21.
+			assert_eq!(slashes[1].0, (21, Perbill::from_percent(10), 0));
+		})
 }
 
 #[test]
@@ -3314,7 +3354,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid
 			assert_eq!(exposure_11.total, 1000 + 125);
 			assert_eq!(exposure_21.total, 1000 + 375);
 
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
 			assert_eq!(
 				staking_events_since_last_call(),
@@ -3322,18 +3362,22 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid
 					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(10),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
 					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 },
 					Event::Slashed { staker: 11, amount: 100 },
 					Event::Slashed { staker: 101, amount: 12 },
 				]
 			);
 
+			assert!(matches!(
+				session_events().as_slice(),
+				&[.., SessionEvent::ValidatorDisabled { validator: 11 }]
+			));
+
 			// post-slash balance
 			let nominator_slash_amount_11 = 125 / 10;
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 900);
@@ -3370,17 +3414,14 @@ fn non_slashable_offence_disables_validator() {
 			mock::start_active_era(1);
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]);
 
-			let exposure_11 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &21);
-
 			// offence with no slash associated
-			on_offence_now(&[offence_from(11, None)], &[Perbill::zero()]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::zero()], true);
 
 			// it does NOT affect the nominator.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
 
 			// offence that slashes 25% of the bond
-			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)]);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)], true);
 
 			// it DOES NOT affect the nominator.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3391,24 +3432,31 @@ fn non_slashable_offence_disables_validator() {
 					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(0),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 21,
 						fraction: Perbill::from_percent(25),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 21 },
 					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 },
 					Event::Slashed { staker: 21, amount: 250 },
 					Event::Slashed { staker: 101, amount: 94 }
 				]
 			);
 
+			assert!(matches!(
+				session_events().as_slice(),
+				&[
+					..,
+					SessionEvent::ValidatorDisabled { validator: 11 },
+					SessionEvent::ValidatorDisabled { validator: 21 },
+				]
+			));
+
 			// the offence for validator 11 wasn't slashable but it is disabled
 			assert!(is_disabled(11));
 			// validator 21 gets disabled too
@@ -3426,14 +3474,11 @@ fn slashing_independent_of_disabling_validator() {
 			mock::start_active_era(1);
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51]);
 
-			let exposure_11 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &21);
-
 			let now = ActiveEra::<Test>::get().unwrap().index;
 
 			// --- Disable without a slash ---
 			// offence with no slash associated
-			on_offence_in_era(&[offence_from(11, None)], &[Perbill::zero()], now);
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::zero()], now, true);
 
 			// nomination remains untouched.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3443,10 +3488,10 @@ fn slashing_independent_of_disabling_validator() {
 
 			// --- Slash without disabling ---
 			// offence that slashes 50% of the bond (setup for next slash)
-			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(50)], now);
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(50)], now, true);
 
 			// offence that slashes 25% of the bond but does not disable
-			on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(25)], now);
+			on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(25)], now, true);
 
 			// nomination remains untouched.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3461,30 +3506,39 @@ fn slashing_independent_of_disabling_validator() {
 					Event::PagedElectionProceeded { page: 0, result: Ok(5) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(0),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(50),
-						slash_era: 1
+						offence_era: 1
 					},
 					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 },
 					Event::Slashed { staker: 11, amount: 500 },
 					Event::Slashed { staker: 101, amount: 62 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 21,
 						fraction: Perbill::from_percent(25),
-						slash_era: 1
+						offence_era: 1
 					},
 					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 },
 					Event::Slashed { staker: 21, amount: 250 },
 					Event::Slashed { staker: 101, amount: 94 }
 				]
 			);
+
+			assert_eq!(
+				session_events(),
+				vec![
+					SessionEvent::NewSession { session_index: 1 },
+					SessionEvent::NewSession { session_index: 2 },
+					SessionEvent::NewSession { session_index: 3 },
+					SessionEvent::ValidatorDisabled { validator: 11 }
+				]
+			);
 		});
 }
 
@@ -3506,19 +3560,14 @@ fn offence_threshold_doesnt_plan_new_era() {
 
 			// we have 4 validators and an offending validator threshold of 1/3,
 			// even if the third validator commits an offence a new era should not be forced
-
-			let exposure_11 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &21);
-			let exposure_31 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &31);
-
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)], true);
 
 			// 11 should be disabled because the byzantine threshold is 1
 			assert!(is_disabled(11));
 
 			assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 
-			on_offence_now(&[offence_from(21, None)], &[Perbill::zero()]);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::zero()], true);
 
 			// 21 should not be disabled because the number of disabled validators will be above the
 			// byzantine threshold
@@ -3526,7 +3575,7 @@ fn offence_threshold_doesnt_plan_new_era() {
 
 			assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 
-			on_offence_now(&[offence_from(31, None)], &[Perbill::zero()]);
+			on_offence_now(&[offence_from(31, None)], &[Perbill::zero()], true);
 
 			// same for 31
 			assert!(!is_disabled(31));
@@ -3548,10 +3597,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() {
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]);
 			assert_eq!(<Test as Config>::SessionsPerEra::get(), 3);
 
-			let exposure_11 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &21);
-
-			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)]);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)], true);
 
 			// nominations are not updated.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3565,7 +3611,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() {
 			assert!(is_disabled(21));
 
 			// validator 11 commits an offence
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)], true);
 
 			// nominations are not updated.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3681,11 +3727,9 @@ fn zero_slash_keeps_nominators() {
 			mock::start_active_era(1);
 
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
-
-			let exposure = Staking::eras_stakers(active_era(), &11);
 			assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
 
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true);
 
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 			assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
@@ -4701,7 +4745,7 @@ fn offences_weight_calculated_correctly() {
 			>,
 		> = (1..10)
 			.map(|i| OffenceDetails {
-				offender: (i, i),
+				offender: (i, ()),
 				reporters: vec![],
 			})
 			.collect();
@@ -6749,7 +6793,7 @@ mod staking_interface {
 	#[test]
 	fn do_withdraw_unbonded_with_wrong_slash_spans_works_as_expected() {
 		ExtBuilder::default().build_and_execute(|| {
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true);
 
 			assert_eq!(Staking::bonded(&11), Some(11));
 
@@ -7033,7 +7077,7 @@ mod staking_unchecked {
 				let exposed_nominator = initial_exposure.others.first().unwrap().value;
 
 				// 11 goes offline
-				on_offence_now(&[offence_from(11, None)], &[slash_percent]);
+				on_offence_now(&[offence_from(11, None)], &[slash_percent], true);
 
 				let slash_amount = slash_percent * exposed_stake;
 				let validator_share =
@@ -7099,7 +7143,7 @@ mod staking_unchecked {
 				let nominator_stake = Staking::ledger(101.into()).unwrap().total;
 
 				// 11 goes offline
-				on_offence_now(&[offence_from(11, None)], &[slash_percent]);
+				on_offence_now(&[offence_from(11, None)], &[slash_percent], true);
 
 				// both stakes must have been decreased to 0.
 				assert_eq!(Staking::ledger(101.into()).unwrap().active, 0);
@@ -8011,39 +8055,20 @@ mod ledger_recovery {
 }
 
 mod byzantine_threshold_disabling_strategy {
-	use crate::{
-		tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy, UpToLimitDisablingStrategy,
-	};
+	use crate::tests::{DisablingStrategy, Test, UpToLimitDisablingStrategy};
 	use sp_runtime::Perbill;
-	use sp_staking::{offence::OffenceSeverity, EraIndex};
+	use sp_staking::offence::OffenceSeverity;
 
 	// Common test data - the stash of the offending validator, the era of the offence and the
 	// active set
 	const OFFENDER_ID: <Test as frame_system::Config>::AccountId = 7;
 	const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100));
 	const MIN_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0));
-	const SLASH_ERA: EraIndex = 1;
 	const ACTIVE_SET: [<Test as pallet_session::Config>::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7];
 	const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set
 
-	#[test]
-	fn dont_disable_for_ancient_offence() {
-		sp_io::TestExternalities::default().execute_with(|| {
-			let initially_disabled = vec![];
-			pallet_session::Validators::<Test>::put(ACTIVE_SET.to_vec());
-			ActiveEra::<Test>::put(ActiveEraInfo { index: 2, start: None });
-
-			let disabling_decision =
-				<UpToLimitDisablingStrategy as DisablingStrategy<Test>>::decision(
-					&OFFENDER_ID,
-					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
-					&initially_disabled,
-				);
-
-			assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none());
-		});
-	}
+	// todo(ank4n): Ensure there is a test that for older eras, the disabling strategy does not
+	// disable the validator.
 
 	#[test]
 	fn dont_disable_beyond_byzantine_threshold() {
@@ -8055,7 +8080,6 @@ mod byzantine_threshold_disabling_strategy {
 				<UpToLimitDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8073,7 +8097,6 @@ mod byzantine_threshold_disabling_strategy {
 				<UpToLimitDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8083,41 +8106,18 @@ mod byzantine_threshold_disabling_strategy {
 }
 
 mod disabling_strategy_with_reenabling {
-	use crate::{
-		tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy,
-		UpToLimitWithReEnablingDisablingStrategy,
-	};
+	use crate::tests::{DisablingStrategy, Test, UpToLimitWithReEnablingDisablingStrategy};
 	use sp_runtime::Perbill;
-	use sp_staking::{offence::OffenceSeverity, EraIndex};
+	use sp_staking::offence::OffenceSeverity;
 
 	// Common test data - the stash of the offending validator, the era of the offence and the
 	// active set
 	const OFFENDER_ID: <Test as frame_system::Config>::AccountId = 7;
 	const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100));
 	const LOW_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0));
-	const SLASH_ERA: EraIndex = 1;
 	const ACTIVE_SET: [<Test as pallet_session::Config>::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7];
 	const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set
 
-	#[test]
-	fn dont_disable_for_ancient_offence() {
-		sp_io::TestExternalities::default().execute_with(|| {
-			let initially_disabled = vec![];
-			pallet_session::Validators::<Test>::put(ACTIVE_SET.to_vec());
-			ActiveEra::<Test>::put(ActiveEraInfo { index: 2, start: None });
-
-			let disabling_decision =
-				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
-					&OFFENDER_ID,
-					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
-					&initially_disabled,
-				);
-
-			assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none());
-		});
-	}
-
 	#[test]
 	fn disable_when_below_byzantine_threshold() {
 		sp_io::TestExternalities::default().execute_with(|| {
@@ -8128,7 +8128,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8148,7 +8147,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8169,7 +8167,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					LOW_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8187,7 +8184,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8208,7 +8204,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8230,7 +8225,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8251,7 +8245,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					LOW_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8270,7 +8263,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8291,13 +8283,9 @@ fn reenable_lower_offenders_mock() {
 			mock::start_active_era(1);
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]);
 
-			let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21);
-			let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31);
-
 			// offence with a low slash
-			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]);
-			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(20)]);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(20)], true);
 
 			// it does NOT affect the nominator.
 			assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]);
@@ -8307,7 +8295,7 @@ fn reenable_lower_offenders_mock() {
 			assert!(is_disabled(21));
 
 			// offence with a higher slash
-			on_offence_now(&[offence_from(31, None)], &[Perbill::from_percent(50)]);
+			on_offence_now(&[offence_from(31, None)], &[Perbill::from_percent(50)], true);
 
 			// First offender is no longer disabled
 			assert!(!is_disabled(11));
@@ -8322,35 +8310,42 @@ fn reenable_lower_offenders_mock() {
 					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(10),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
 					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 },
 					Event::Slashed { staker: 11, amount: 100 },
 					Event::Slashed { staker: 101, amount: 12 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 21,
 						fraction: Perbill::from_percent(20),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 21 },
 					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 },
 					Event::Slashed { staker: 21, amount: 200 },
 					Event::Slashed { staker: 101, amount: 75 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 31,
 						fraction: Perbill::from_percent(50),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 31 },
-					Event::ValidatorReenabled { stash: 11 },
 					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 31, page: 0 },
 					Event::Slashed { staker: 31, amount: 250 },
 				]
 			);
+
+			assert!(matches!(
+				session_events().as_slice(),
+				&[
+					..,
+					SessionEvent::ValidatorDisabled { validator: 11 },
+					SessionEvent::ValidatorDisabled { validator: 21 },
+					SessionEvent::ValidatorDisabled { validator: 31 },
+					SessionEvent::ValidatorReenabled { validator: 11 },
+				]
+			));
 		});
 }
 
@@ -8366,14 +8361,11 @@ fn do_not_reenable_higher_offenders_mock() {
 			mock::start_active_era(1);
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]);
 
-			let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21);
-			let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31);
-
 			// offence with a major slash
 			on_offence_now(
 				&[offence_from(11, None), offence_from(21, None), offence_from(31, None)],
 				&[Perbill::from_percent(50), Perbill::from_percent(50), Perbill::from_percent(10)],
+				true,
 			);
 
 			// both validators should be disabled
@@ -8389,22 +8381,20 @@ fn do_not_reenable_higher_offenders_mock() {
 					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(50),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 21,
 						fraction: Perbill::from_percent(50),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 21 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 31,
 						fraction: Perbill::from_percent(10),
-						slash_era: 1
+						offence_era: 1
 					},
 					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 31, page: 0 },
 					Event::Slashed { staker: 31, amount: 50 },
@@ -8416,6 +8406,15 @@ fn do_not_reenable_higher_offenders_mock() {
 					Event::Slashed { staker: 101, amount: 62 },
 				]
 			);
+
+			assert!(matches!(
+				session_events().as_slice(),
+				&[
+					..,
+					SessionEvent::ValidatorDisabled { validator: 11 },
+					SessionEvent::ValidatorDisabled { validator: 21 },
+				]
+			));
 		});
 }
 
@@ -9009,3 +9008,409 @@ mod hold_migration {
 		});
 	}
 }
+
+mod paged_slashing {
+	use super::*;
+	use crate::slashing::OffenceRecord;
+
+	#[test]
+	fn offence_processed_in_multi_block() {
+		// Ensure each page is processed only once.
+		ExtBuilder::default()
+			.has_stakers(false)
+			.slash_defer_duration(3)
+			.build_and_execute(|| {
+				let base_stake = 1000;
+
+				// Create a validator:
+				bond_validator(11, base_stake);
+				assert_eq!(Validators::<Test>::count(), 1);
+
+				// Track the total exposure of 11.
+				let mut exposure_counter = base_stake;
+
+				// Exposure page size is 64, hence it creates 4 pages of exposure.
+				let expected_page_count = 4;
+				for i in 0..200 {
+					let bond_amount = base_stake + i as Balance;
+					bond_nominator(1000 + i, bond_amount, vec![11]);
+					// with multi page reward payout, payout exposure is same as total exposure.
+					exposure_counter += bond_amount;
+				}
+
+				mock::start_active_era(1);
+
+				assert_eq!(
+					ErasStakersOverview::<Test>::get(1, 11).expect("exposure should exist"),
+					PagedExposureMetadata {
+						total: exposure_counter,
+						own: base_stake,
+						page_count: expected_page_count,
+						nominator_count: 200,
+					}
+				);
+
+				mock::start_active_era(2);
+				System::reset_events();
+
+				// report an offence for 11 in era 1.
+				on_offence_in_era(
+					&[offence_from(11, None)],
+					&[Perbill::from_percent(10)],
+					1,
+					false,
+				);
+
+				// ensure offence is queued.
+				assert_eq!(
+					staking_events_since_last_call().as_slice(),
+					vec![Event::OffenceReported {
+						validator: 11,
+						fraction: Perbill::from_percent(10),
+						offence_era: 1
+					}]
+				);
+
+				// ensure offence queue has items.
+				assert_eq!(
+					OffenceQueue::<Test>::get(1, 11).unwrap(),
+					slashing::OffenceRecord {
+						reporter: None,
+						reported_era: 2,
+						// first page to be marked for processing.
+						exposure_page: expected_page_count - 1,
+						slash_fraction: Perbill::from_percent(10),
+						prior_slash_fraction: Perbill::zero(),
+					}
+				);
+
+				// The offence era is noted in the queue.
+				assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![1]);
+
+				// ensure Processing offence is empty yet.
+				assert_eq!(ProcessingOffence::<Test>::get(), None);
+
+				// ensure no unapplied slashes for era 4 (offence_era + slash_defer_duration).
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 0);
+
+				// Checkpoint 1: advancing to next block will compute the first page of slash.
+				advance_blocks(1);
+
+				// ensure the last page of offence is processed.
+				// (offence is processed in reverse order of pages)
+				assert_eq!(
+					staking_events_since_last_call().as_slice(),
+					vec![Event::SlashComputed {
+						offence_era: 1,
+						slash_era: 4,
+						offender: 11,
+						page: expected_page_count - 1
+					},]
+				);
+
+				// offender is removed from offence queue
+				assert_eq!(OffenceQueue::<Test>::get(1, 11), None);
+
+				// offence era is removed from queue.
+				assert_eq!(OffenceQueueEras::<Test>::get(), None);
+
+				// this offence is not completely processed yet, so it should be in processing.
+				assert_eq!(
+					ProcessingOffence::<Test>::get(),
+					Some((
+						1,
+						11,
+						OffenceRecord {
+							reporter: None,
+							reported_era: 2,
+							// page 3 is processed, next page to be processed is 2.
+							exposure_page: 2,
+							slash_fraction: Perbill::from_percent(10),
+							prior_slash_fraction: Perbill::zero(),
+						}
+					))
+				);
+
+				// unapplied slashes for era 4.
+				let slashes = UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>();
+				// only one unapplied slash exists.
+				assert_eq!(slashes.len(), 1);
+				let (slash_key, unapplied_slash) = &slashes[0];
+				// this is a unique key to ensure unapplied slash is not overwritten for multiple
+				// offence by offender in the same era.
+				assert_eq!(*slash_key, (11, Perbill::from_percent(10), expected_page_count - 1));
+
+				// validator own stake is only included in the first page. Since this is page 3,
+				// only nominators are slashed.
+				assert_eq!(unapplied_slash.own, 0);
+				assert_eq!(unapplied_slash.validator, 11);
+				assert_eq!(unapplied_slash.others.len(), 200 % 64);
+
+				// Checkpoint 2: advancing to next block will compute the second page of slash.
+				advance_blocks(1);
+
+				// offence queue still empty
+				assert_eq!(OffenceQueue::<Test>::get(1, 11), None);
+				assert_eq!(OffenceQueueEras::<Test>::get(), None);
+
+				// processing offence points to next page.
+				assert_eq!(
+					ProcessingOffence::<Test>::get(),
+					Some((
+						1,
+						11,
+						OffenceRecord {
+							reporter: None,
+							reported_era: 2,
+							// page 2 is processed, next page to be processed is 1.
+							exposure_page: 1,
+							slash_fraction: Perbill::from_percent(10),
+							prior_slash_fraction: Perbill::zero(),
+						}
+					))
+				);
+
+				// there are two unapplied slashes for era 4.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 2);
+
+				// ensure the last page of offence is processed.
+				// (offence is processed in reverse order of pages)
+				assert_eq!(
+					staking_events_since_last_call().as_slice(),
+					vec![Event::SlashComputed {
+						offence_era: 1,
+						slash_era: 4,
+						offender: 11,
+						page: expected_page_count - 2
+					},]
+				);
+
+				// Checkpoint 3: advancing to two more blocks will complete the processing of the
+				// reported offence
+				advance_blocks(2);
+
+				// no processing offence.
+				assert!(ProcessingOffence::<Test>::get().is_none());
+				// total of 4 unapplied slash.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 4);
+
+				// Checkpoint 4: lets verify the application of slashes in multiple blocks.
+				// advance to era 4.
+				mock::start_active_era(4);
+				// slashes are not applied just yet. From next blocks, they will be applied.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 4);
+
+				// advance to next block.
+				advance_blocks(1);
+				// 1 slash is applied.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 3);
+
+				// advance two blocks.
+				advance_blocks(2);
+				// 2 more slashes are applied.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 1);
+
+				// advance one more block.
+				advance_blocks(1);
+				// all slashes are applied.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 0);
+
+				// ensure all stakers are slashed correctly.
+				assert_eq!(asset::staked::<Test>(&11), 1000 - 100);
+
+				for i in 0..200 {
+					let original_stake = 1000 + i as Balance;
+					let expected_slash = Perbill::from_percent(10) * original_stake;
+					assert_eq!(asset::staked::<Test>(&(1000 + i)), original_stake - expected_slash);
+				}
+			})
+	}
+
+	#[test]
+	fn offence_discarded_correctly() {
+		ExtBuilder::default().slash_defer_duration(3).build_and_execute(|| {
+			start_active_era(2);
+
+			// Scenario 1: 11 commits an offence in era 2.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(10)], 2, false);
+
+			// offence is queued, not processed yet.
+			let queued_offence_one = OffenceQueue::<Test>::get(2, 11).unwrap();
+			assert_eq!(queued_offence_one.slash_fraction, Perbill::from_percent(10));
+			assert_eq!(queued_offence_one.prior_slash_fraction, Perbill::zero());
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![2]);
+
+			// Scenario 1A: 11 commits a second offence in era 2 with **lower** slash fraction than
+			// the previous offence.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(5)], 2, false);
+
+			// the second offence is discarded. No change in the queue.
+			assert_eq!(OffenceQueue::<Test>::get(2, 11).unwrap(), queued_offence_one);
+
+			// Scenario 1B: 11 commits a second offence in era 2 with **higher** slash fraction than
+			// the previous offence.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(15)], 2, false);
+
+			// the second offence overwrites the first offence.
+			let overwritten_offence = OffenceQueue::<Test>::get(2, 11).unwrap();
+			assert!(overwritten_offence.slash_fraction > queued_offence_one.slash_fraction);
+			assert_eq!(overwritten_offence.slash_fraction, Perbill::from_percent(15));
+			assert_eq!(overwritten_offence.prior_slash_fraction, Perbill::zero());
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![2]);
+
+			// Scenario 2: 11 commits another offence in era 2, but after the previous offence is
+			// processed.
+			advance_blocks(1);
+			assert!(OffenceQueue::<Test>::get(2, 11).is_none());
+			assert!(OffenceQueueEras::<Test>::get().is_none());
+			// unapplied slash is created for the offence.
+			assert!(UnappliedSlashes::<Test>::contains_key(
+				2 + 3,
+				(11, Perbill::from_percent(15), 0)
+			));
+
+			// Scenario 2A: offence has **lower** slash fraction than the previous offence.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(14)], 2, false);
+			// offence is discarded.
+			assert!(OffenceQueue::<Test>::get(2, 11).is_none());
+			assert!(OffenceQueueEras::<Test>::get().is_none());
+
+			// Scenario 2B: offence has **higher** slash fraction than the previous offence.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(16)], 2, false);
+			// process offence
+			advance_blocks(1);
+			// there are now two slash records for 11, for era 5, with the newer one only slashing
+			// the diff between slash fractions of 16 and 15.
+			let slash_one =
+				UnappliedSlashes::<Test>::get(2 + 3, (11, Perbill::from_percent(15), 0)).unwrap();
+			let slash_two =
+				UnappliedSlashes::<Test>::get(2 + 3, (11, Perbill::from_percent(16), 0)).unwrap();
+			assert!(slash_one.own > slash_two.own);
+		});
+	}
+
+	#[test]
+	fn offence_eras_queued_correctly() {
+		ExtBuilder::default().build_and_execute(|| {
+			// 11 and 21 are validators.
+			assert_eq!(Staking::status(&11).unwrap(), StakerStatus::Validator);
+			assert_eq!(Staking::status(&21).unwrap(), StakerStatus::Validator);
+
+			start_active_era(2);
+
+			// 11 and 21 commits offence in era 2.
+			on_offence_in_era(
+				&[offence_from(11, None), offence_from(21, None)],
+				&[Perbill::from_percent(10), Perbill::from_percent(20)],
+				2,
+				false,
+			);
+
+			// 11 and 21 commits offence in era 1 but reported after the era 2 offence.
+			on_offence_in_era(
+				&[offence_from(11, None), offence_from(21, None)],
+				&[Perbill::from_percent(10), Perbill::from_percent(20)],
+				1,
+				false,
+			);
+
+			// queued offence eras are sorted.
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![1, 2]);
+
+			// next two blocks, the offence in era 1 is processed.
+			advance_blocks(2);
+
+			// only era 2 is left in the queue.
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![2]);
+
+			// next block, the offence in era 2 is processed.
+			advance_blocks(1);
+
+			// era still exist in the queue.
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![2]);
+
+			// next block, the era 2 is processed.
+			advance_blocks(1);
+
+			// queue is empty.
+			assert_eq!(OffenceQueueEras::<Test>::get(), None);
+		});
+	}
+	#[test]
+	fn non_deferred_slash_applied_instantly() {
+		ExtBuilder::default().build_and_execute(|| {
+			mock::start_active_era(2);
+			let validator_stake = asset::staked::<Test>(&11);
+			let slash_fraction = Perbill::from_percent(10);
+			let expected_slash = slash_fraction * validator_stake;
+			System::reset_events();
+
+			// report an offence for 11 in era 1.
+			on_offence_in_era(&[offence_from(11, None)], &[slash_fraction], 1, false);
+
+			// ensure offence is queued.
+			assert_eq!(
+				staking_events_since_last_call().as_slice(),
+				vec![Event::OffenceReported {
+					validator: 11,
+					fraction: Perbill::from_percent(10),
+					offence_era: 1
+				}]
+			);
+
+			// process offence
+			advance_blocks(1);
+
+			// ensure slash is computed and applied.
+			assert_eq!(
+				staking_events_since_last_call().as_slice(),
+				vec![
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 },
+					Event::Slashed { staker: 11, amount: expected_slash },
+					// this is the nominator of 11.
+					Event::Slashed { staker: 101, amount: 12 },
+				]
+			);
+
+			// ensure validator is slashed.
+			assert_eq!(asset::staked::<Test>(&11), validator_stake - expected_slash);
+		});
+	}
+
+	#[test]
+	fn validator_with_no_exposure_slashed() {
+		ExtBuilder::default().build_and_execute(|| {
+			let validator_stake = asset::staked::<Test>(&11);
+			let slash_fraction = Perbill::from_percent(10);
+			let expected_slash = slash_fraction * validator_stake;
+
+			// only 101 nominates 11, lets remove them.
+			assert_ok!(Staking::nominate(RuntimeOrigin::signed(101), vec![21]));
+
+			start_active_era(2);
+			// ensure validator has no exposure.
+			assert_eq!(ErasStakersOverview::<Test>::get(2, 11).unwrap().page_count, 0,);
+
+			// clear events
+			System::reset_events();
+
+			// report an offence for 11.
+			on_offence_now(&[offence_from(11, None)], &[slash_fraction], true);
+
+			// ensure validator is slashed.
+			assert_eq!(asset::staked::<Test>(&11), validator_stake - expected_slash);
+			assert_eq!(
+				staking_events_since_last_call().as_slice(),
+				vec![
+					Event::OffenceReported {
+						offence_era: 2,
+						validator: 11,
+						fraction: slash_fraction
+					},
+					Event::SlashComputed { offence_era: 2, slash_era: 2, offender: 11, page: 0 },
+					Event::Slashed { staker: 11, amount: expected_slash },
+				]
+			);
+		});
+	}
+}
diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs
index 92fe0e176a2e6cd6383ac23fd7a48d41135887f6..36b7be744986634102e94b19e80678611a4a6e28 100644
--- a/substrate/frame/staking/src/weights.rs
+++ b/substrate/frame/staking/src/weights.rs
@@ -84,6 +84,7 @@ pub trait WeightInfo {
 	fn set_min_commission() -> Weight;
 	fn restore_ledger() -> Weight;
 	fn migrate_currency() -> Weight;
+	fn apply_slash() -> Weight;
 }
 
 /// Weights for `pallet_staking` using the Substrate node and recommended hardware.
@@ -815,6 +816,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 			.saturating_add(T::DbWeight::get().reads(6_u64))
 			.saturating_add(T::DbWeight::get().writes(2_u64))
 	}
+	fn apply_slash() -> Weight {
+		// TODO CI-FAIL: run CI bench bot
+		Weight::zero()
+	}
 }
 
 // For backwards compatibility and tests.
@@ -1545,4 +1550,8 @@ impl WeightInfo for () {
 			.saturating_add(RocksDbWeight::get().reads(6_u64))
 			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
+	fn apply_slash() -> Weight {
+		// TODO CI-FAIL: run CI bench bot
+		Weight::zero()
+	}
 }
diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs
index 6e475b7067e16e8d22eddc1861db63aac470d0ce..1418ea0b6f0a404b28c0ddb17eedcff92c0415c1 100644
--- a/substrate/frame/state-trie-migration/src/lib.rs
+++ b/substrate/frame/state-trie-migration/src/lib.rs
@@ -106,6 +106,7 @@ pub mod pallet {
 		CloneNoBound,
 		Encode,
 		Decode,
+		DecodeWithMemTracking,
 		scale_info::TypeInfo,
 		PartialEqNoBound,
 		EqNoBound,
@@ -127,7 +128,16 @@ pub mod pallet {
 	/// A migration task stored in state.
 	///
 	/// It tracks the last top and child keys read.
-	#[derive(Clone, Encode, Decode, scale_info::TypeInfo, PartialEq, Eq, MaxEncodedLen)]
+	#[derive(
+		Clone,
+		Encode,
+		Decode,
+		DecodeWithMemTracking,
+		scale_info::TypeInfo,
+		PartialEq,
+		Eq,
+		MaxEncodedLen,
+	)]
 	#[scale_info(skip_type_params(T))]
 	pub struct MigrationTask<T: Config> {
 		/// The current top trie migration progress.
@@ -404,6 +414,7 @@ pub mod pallet {
 		Copy,
 		Encode,
 		Decode,
+		DecodeWithMemTracking,
 		scale_info::TypeInfo,
 		Default,
 		Debug,
@@ -419,7 +430,17 @@ pub mod pallet {
 	}
 
 	/// How a migration was computed.
-	#[derive(Clone, Copy, Encode, Decode, scale_info::TypeInfo, Debug, PartialEq, Eq)]
+	#[derive(
+		Clone,
+		Copy,
+		Encode,
+		Decode,
+		DecodeWithMemTracking,
+		scale_info::TypeInfo,
+		Debug,
+		PartialEq,
+		Eq,
+	)]
 	pub enum MigrationCompute {
 		/// A signed origin triggered the migration.
 		Signed,
diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs
index 14bc2667def179cc0d403287acca811939824bbe..92284f370a5f33f8449996e2d10db4d471f24c0e 100644
--- a/substrate/frame/support/src/dispatch.rs
+++ b/substrate/frame/support/src/dispatch.rs
@@ -19,7 +19,7 @@
 //! generating values representing lazy module function calls.
 
 use crate::traits::UnfilteredDispatchable;
-use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen};
+use codec::{Codec, Decode, DecodeWithMemTracking, Encode, EncodeLike, MaxEncodedLen};
 use core::fmt;
 use scale_info::TypeInfo;
 #[cfg(feature = "std")]
@@ -72,7 +72,17 @@ pub trait CheckIfFeeless {
 }
 
 /// Origin for the System pallet.
-#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)]
+#[derive(
+	PartialEq,
+	Eq,
+	Clone,
+	RuntimeDebug,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum RawOrigin<AccountId> {
 	/// The system itself ordained this dispatch to happen: this is the highest privilege level.
 	Root,
@@ -135,7 +145,9 @@ pub trait PaysFee<T> {
 }
 
 /// Explicit enum to denote if a transaction pays fee or not.
-#[derive(Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode, TypeInfo)]
+#[derive(
+	Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode, DecodeWithMemTracking, TypeInfo,
+)]
 pub enum Pays {
 	/// Transactor will pay related fees.
 	Yes,
@@ -170,7 +182,9 @@ impl From<bool> for Pays {
 /// [DispatchClass::all] and [DispatchClass::non_mandatory] helper functions.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))]
-#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, TypeInfo)]
+#[derive(
+	PartialEq, Eq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, RuntimeDebug, TypeInfo,
+)]
 pub enum DispatchClass {
 	/// A normal dispatch.
 	Normal,
@@ -291,7 +305,18 @@ pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &Dispa
 
 /// Weight information that is only available post dispatch.
 /// NOTE: This can only be used to reduce the weight or fee, not increase it.
-#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)]
+#[derive(
+	Clone,
+	Copy,
+	Eq,
+	PartialEq,
+	Default,
+	RuntimeDebug,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+)]
 pub struct PostDispatchInfo {
 	/// Actual weight consumed by a call or `None` which stands for the worst case static weight.
 	pub actual_weight: Option<Weight>,
diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs
index 825628fb01edb1600efbd379faac4f0665621f93..86c7330d275de3cfa722fd9ba789a42a1a730dec 100644
--- a/substrate/frame/support/src/lib.rs
+++ b/substrate/frame/support/src/lib.rs
@@ -914,7 +914,7 @@ pub mod pallet_prelude {
 		Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity,
 		PartialEqNoBound, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat,
 	};
-	pub use codec::{Decode, Encode, MaxEncodedLen};
+	pub use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 	pub use core::marker::PhantomData;
 	pub use frame_support::pallet_macros::*;
 	pub use frame_support_procedural::{inject_runtime_type, register_default_impl};
diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs
index d28716237119e8972b7069a806a82cbca0c26169..f2bbb2fdadabe25e4d647c0344d81b4a9ceb11dc 100644
--- a/substrate/frame/support/src/traits/messages.rs
+++ b/substrate/frame/support/src/traits/messages.rs
@@ -18,7 +18,7 @@
 //! Traits for managing message queuing and handling.
 
 use super::storage::Footprint;
-use codec::{Decode, Encode, FullCodec, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, FullCodec, MaxEncodedLen};
 use core::{fmt::Debug, marker::PhantomData};
 use scale_info::TypeInfo;
 use sp_core::{ConstU32, Get, TypedGet};
@@ -27,7 +27,7 @@ use sp_weights::{Weight, WeightMeter};
 
 /// Errors that can happen when attempting to process a message with
 /// [`ProcessMessage::process_message()`].
-#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)]
+#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, DecodeWithMemTracking, TypeInfo, Debug)]
 pub enum ProcessMessageError {
 	/// The message data format is unknown (e.g. unrecognised header)
 	BadFormat,
diff --git a/substrate/frame/support/src/traits/preimages.rs b/substrate/frame/support/src/traits/preimages.rs
index 6e46a7489654ee61b76bf3da219fcafec4333acd..e0c76b8bdff817245214bcf0024989615a5de280 100644
--- a/substrate/frame/support/src/traits/preimages.rs
+++ b/substrate/frame/support/src/traits/preimages.rs
@@ -18,7 +18,7 @@
 //! Stuff for dealing with hashed preimages.
 
 use alloc::borrow::Cow;
-use codec::{Decode, Encode, EncodeLike, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, EncodeLike, MaxEncodedLen};
 use scale_info::TypeInfo;
 use sp_core::RuntimeDebug;
 use sp_runtime::{
@@ -31,7 +31,17 @@ pub type BoundedInline = crate::BoundedVec<u8, ConstU32<128>>;
 /// The maximum we expect a single legacy hash lookup to be.
 const MAX_LEGACY_LEN: u32 = 1_000_000;
 
-#[derive(Encode, Decode, MaxEncodedLen, Clone, Eq, PartialEq, TypeInfo, RuntimeDebug)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	Clone,
+	Eq,
+	PartialEq,
+	TypeInfo,
+	RuntimeDebug,
+)]
 #[codec(mel_bound())]
 pub enum Bounded<T, H: Hash> {
 	/// A hash with no preimage length. We do not support creation of this except
diff --git a/substrate/frame/support/src/traits/schedule.rs b/substrate/frame/support/src/traits/schedule.rs
index a302e28d4ce24fe4c6c1b7cc7a4fcb79d0e107c2..2bf53d5f45a98e0984df51fd8a6a7084f03bad9e 100644
--- a/substrate/frame/support/src/traits/schedule.rs
+++ b/substrate/frame/support/src/traits/schedule.rs
@@ -20,7 +20,7 @@
 #[allow(deprecated)]
 use super::PreimageProvider;
 use alloc::vec::Vec;
-use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen};
+use codec::{Codec, Decode, DecodeWithMemTracking, Encode, EncodeLike, MaxEncodedLen};
 use core::{fmt::Debug, result::Result};
 use scale_info::TypeInfo;
 use sp_runtime::{traits::Saturating, DispatchError, RuntimeDebug};
@@ -35,7 +35,18 @@ pub type Period<BlockNumber> = (BlockNumber, u32);
 pub type Priority = u8;
 
 /// The dispatch time of a scheduled task.
-#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Copy,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum DispatchTime<BlockNumber> {
 	/// At specified block.
 	At(BlockNumber),
diff --git a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs
index 5cb1d0a9e7b0758504ad92a0a6698ad1f652e078..f8d476d517fe4d36427a579841b285cb89ed61ad 100644
--- a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs
+++ b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs
@@ -20,7 +20,7 @@
 //!
 //! See the [`crate::traits::fungible`] doc for more information about fungible traits.
 
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::cmp::Ordering;
 use frame_support::traits::{
 	fungible::imbalance,
@@ -40,7 +40,9 @@ use sp_runtime::{
 
 /// The `NativeOrWithId` enum classifies an asset as either `Native` to the current chain or as an
 /// asset with a specific ID.
-#[derive(Decode, Encode, Default, MaxEncodedLen, TypeInfo, Clone, RuntimeDebug, Eq)]
+#[derive(
+	Decode, DecodeWithMemTracking, Encode, Default, MaxEncodedLen, TypeInfo, Clone, RuntimeDebug, Eq,
+)]
 pub enum NativeOrWithId<AssetId>
 where
 	AssetId: Ord,
diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs
index 52d3e8c014b317261746371d8ff99acc66f2a517..4978016603dcbdc2b58e4489404023356aaa2952 100644
--- a/substrate/frame/support/src/traits/tokens/misc.rs
+++ b/substrate/frame/support/src/traits/tokens/misc.rs
@@ -18,7 +18,7 @@
 //! Miscellaneous types.
 
 use crate::{traits::Contains, TypeInfo};
-use codec::{Decode, Encode, FullCodec, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, FullCodec, MaxEncodedLen};
 use core::fmt::Debug;
 use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero};
 use sp_core::RuntimeDebug;
@@ -178,7 +178,16 @@ pub enum ExistenceRequirement {
 
 /// Status of funds.
 #[derive(
-	PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen,
+	PartialEq,
+	Eq,
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	RuntimeDebug,
+	scale_info::TypeInfo,
+	MaxEncodedLen,
 )]
 pub enum BalanceStatus {
 	/// Funds are free, as corresponding to `free` item in Balances.
diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs
index 8980c6d6c8f427eedd924a8d2cfb555635b18cf7..9a21c65902983d446e20a42049da8c3cc8632954 100644
--- a/substrate/frame/system/src/lib.rs
+++ b/substrate/frame/system/src/lib.rs
@@ -122,7 +122,7 @@ use sp_runtime::{
 };
 use sp_version::RuntimeVersion;
 
-use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, EncodeLike, FullCodec, MaxEncodedLen};
 #[cfg(feature = "std")]
 use frame_support::traits::BuildGenesisConfig;
 use frame_support::{
@@ -267,7 +267,18 @@ where
 /// Information about the dispatch of a call, to be displayed in the
 /// [`ExtrinsicSuccess`](Event::ExtrinsicSuccess) and [`ExtrinsicFailed`](Event::ExtrinsicFailed)
 /// events.
-#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)]
+#[derive(
+	Clone,
+	Copy,
+	Eq,
+	PartialEq,
+	Default,
+	RuntimeDebug,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+)]
 pub struct DispatchEventInfo {
 	/// Weight of this transaction.
 	pub weight: Weight,
diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs
index 281012ffb4c956ed31913d45332e0f7b7ce742d9..0f1897872cc1f27a2c9275f4879e8f9e1823c455 100644
--- a/substrate/frame/treasury/src/lib.rs
+++ b/substrate/frame/treasury/src/lib.rs
@@ -324,7 +324,7 @@ pub mod pallet {
 
 	/// The count of spends that have been made.
 	#[pallet::storage]
-	pub(crate) type SpendCount<T, I = ()> = StorageValue<_, SpendIndex, ValueQuery>;
+	pub type SpendCount<T, I = ()> = StorageValue<_, SpendIndex, ValueQuery>;
 
 	/// Spends that have been approved and being processed.
 	// Hasher: Twox safe since `SpendIndex` is an internal count based index.
@@ -345,7 +345,7 @@ pub mod pallet {
 
 	/// The blocknumber for the last triggered spend period.
 	#[pallet::storage]
-	pub(crate) type LastSpendPeriod<T, I = ()> = StorageValue<_, BlockNumberFor<T, I>, OptionQuery>;
+	pub type LastSpendPeriod<T, I = ()> = StorageValue<_, BlockNumberFor<T, I>, OptionQuery>;
 
 	#[pallet::genesis_config]
 	#[derive(frame_support::DefaultNoBound)]
diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs
index d543f447ca7a541c579ad8c4e01e2c07eac5099a..a34095a2ffdd2463f1a40c030645ee33a17806e2 100644
--- a/substrate/frame/tx-pause/src/mock.rs
+++ b/substrate/frame/tx-pause/src/mock.rs
@@ -58,6 +58,7 @@ impl pallet_utility::Config for Test {
 	PartialOrd,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	RuntimeDebug,
 	MaxEncodedLen,
 	scale_info::TypeInfo,
diff --git a/substrate/frame/uniques/src/lib.rs b/substrate/frame/uniques/src/lib.rs
index 84f122c08bb7b4c10ee177d78dcfb5e69501e201..d2fa0adb11704bdd423155951fabb1de38851d04 100644
--- a/substrate/frame/uniques/src/lib.rs
+++ b/substrate/frame/uniques/src/lib.rs
@@ -45,7 +45,7 @@ pub mod weights;
 extern crate alloc;
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use frame_support::traits::{
 	tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency,
 };
@@ -182,7 +182,7 @@ pub mod pallet {
 	#[pallet::storage]
 	/// The items held by any given account; set out this way so that items owned by a single
 	/// account can be enumerated.
-	pub(super) type Account<T: Config<I>, I: 'static = ()> = StorageNMap<
+	pub type Account<T: Config<I>, I: 'static = ()> = StorageNMap<
 		_,
 		(
 			NMapKey<Blake2_128Concat, T::AccountId>, // owner
@@ -197,7 +197,7 @@ pub mod pallet {
 	#[pallet::storage_prefix = "ClassAccount"]
 	/// The collections owned by any given account; set out this way so that collections owned by
 	/// a single account can be enumerated.
-	pub(super) type CollectionAccount<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type CollectionAccount<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Blake2_128Concat,
 		T::AccountId,
@@ -246,7 +246,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Attributes of a collection.
-	pub(super) type Attribute<T: Config<I>, I: 'static = ()> = StorageNMap<
+	pub type Attribute<T: Config<I>, I: 'static = ()> = StorageNMap<
 		_,
 		(
 			NMapKey<Blake2_128Concat, T::CollectionId>,
@@ -271,7 +271,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Keeps track of the number of items a collection might have.
-	pub(super) type CollectionMaxSupply<T: Config<I>, I: 'static = ()> =
+	pub type CollectionMaxSupply<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Blake2_128Concat, T::CollectionId, u32, OptionQuery>;
 
 	#[pallet::event]
diff --git a/substrate/frame/uniques/src/types.rs b/substrate/frame/uniques/src/types.rs
index e2e170c72f21ae65912147546f789db74b60550b..b17703223ca6ff9e1bf1431f58cefb59b38dcb80 100644
--- a/substrate/frame/uniques/src/types.rs
+++ b/substrate/frame/uniques/src/types.rs
@@ -25,16 +25,15 @@ use frame_support::{
 use scale_info::TypeInfo;
 
 /// A type alias for handling balance deposits.
-pub(super) type DepositBalanceOf<T, I = ()> =
+pub type DepositBalanceOf<T, I = ()> =
 	<<T as Config<I>>::Currency as Currency<<T as SystemConfig>::AccountId>>::Balance;
 /// A type alias representing the details of a collection.
-pub(super) type CollectionDetailsFor<T, I> =
+pub type CollectionDetailsFor<T, I> =
 	CollectionDetails<<T as SystemConfig>::AccountId, DepositBalanceOf<T, I>>;
 /// A type alias for the details of a single item.
-pub(super) type ItemDetailsFor<T, I> =
-	ItemDetails<<T as SystemConfig>::AccountId, DepositBalanceOf<T, I>>;
+pub type ItemDetailsFor<T, I> = ItemDetails<<T as SystemConfig>::AccountId, DepositBalanceOf<T, I>>;
 /// A type alias to represent the price of an item.
-pub(super) type ItemPrice<T, I = ()> =
+pub type ItemPrice<T, I = ()> =
 	<<T as Config<I>>::Currency as Currency<<T as SystemConfig>::AccountId>>::Balance;
 
 #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
@@ -63,7 +62,18 @@ pub struct CollectionDetails<AccountId, DepositBalance> {
 }
 
 /// Witness data for the destroy transactions.
-#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Copy,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub struct DestroyWitness {
 	/// The total number of outstanding items of this collection.
 	#[codec(compact)]
diff --git a/substrate/frame/utility/src/benchmarking.rs b/substrate/frame/utility/src/benchmarking.rs
index 261d52436889a1033dd4f576c25c070a8944112c..a329815836ba696957c0f0bf9c8b5ab972c3d9f0 100644
--- a/substrate/frame/utility/src/benchmarking.rs
+++ b/substrate/frame/utility/src/benchmarking.rs
@@ -92,6 +92,18 @@ mod benchmark {
 		assert_last_event::<T>(Event::BatchCompleted.into());
 	}
 
+	#[benchmark]
+	fn dispatch_as_fallible() {
+		let caller = account("caller", SEED, SEED);
+		let call = Box::new(frame_system::Call::remark { remark: vec![] }.into());
+		let origin: T::RuntimeOrigin = RawOrigin::Signed(caller).into();
+		let pallets_origin = origin.caller().clone();
+		let pallets_origin = T::PalletsOrigin::from(pallets_origin);
+
+		#[extrinsic_call]
+		_(RawOrigin::Root, Box::new(pallets_origin), call);
+	}
+
 	#[benchmark]
 	fn if_else() {
 		// Failing main call.
diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs
index 63a02febb94c51b6c7f2fe1eff07ae0f6f1f767f..03b193052c37add48192096c470b3c1a11e85e12 100644
--- a/substrate/frame/utility/src/lib.rs
+++ b/substrate/frame/utility/src/lib.rs
@@ -554,6 +554,34 @@ pub mod pallet {
 				post_info: Some(weight).into(),
 			})
 		}
+
+		/// Dispatches a function call with a provided origin.
+		///
+		/// Almost the same as [`Pallet::dispatch_as`] but forwards any error of the inner call.
+		///
+		/// The dispatch origin for this call must be _Root_.
+		#[pallet::call_index(7)]
+		#[pallet::weight({
+			let dispatch_info = call.get_dispatch_info();
+			(
+				T::WeightInfo::dispatch_as_fallible()
+					.saturating_add(dispatch_info.call_weight),
+				dispatch_info.class,
+			)
+		})]
+		pub fn dispatch_as_fallible(
+			origin: OriginFor<T>,
+			as_origin: Box<T::PalletsOrigin>,
+			call: Box<<T as Config>::RuntimeCall>,
+		) -> DispatchResult {
+			ensure_root(origin)?;
+
+			call.dispatch_bypass_filter((*as_origin).into()).map_err(|e| e.error)?;
+
+			Self::deposit_event(Event::DispatchedAs { result: Ok(()) });
+
+			Ok(())
+		}
 	}
 
 	impl<T: Config> Pallet<T> {
diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs
index 759621907dfeda5028298cc2dadee190212dcda5..56e7e3c21ea24853313ab38e3c3a2ff1a3f578bc 100644
--- a/substrate/frame/utility/src/tests.rs
+++ b/substrate/frame/utility/src/tests.rs
@@ -262,6 +262,14 @@ fn call_foobar(err: bool, start_weight: Weight, end_weight: Option<Weight>) -> R
 	RuntimeCall::Example(ExampleCall::foobar { err, start_weight, end_weight })
 }
 
+fn utility_events() -> Vec<Event> {
+	System::events()
+		.into_iter()
+		.map(|r| r.event)
+		.filter_map(|e| if let RuntimeEvent::Utility(inner) = e { Some(inner) } else { None })
+		.collect()
+}
+
 #[test]
 fn as_derivative_works() {
 	new_test_ext().execute_with(|| {
@@ -916,6 +924,33 @@ fn with_weight_works() {
 	})
 }
 
+#[test]
+fn dispatch_as_works() {
+	new_test_ext().execute_with(|| {
+		Balances::force_set_balance(RuntimeOrigin::root(), 666, 100).unwrap();
+		assert_eq!(Balances::free_balance(666), 100);
+		assert_eq!(Balances::free_balance(777), 0);
+		assert_ok!(Utility::dispatch_as(
+			RuntimeOrigin::root(),
+			Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(666))),
+			Box::new(call_transfer(777, 100))
+		));
+		assert_eq!(Balances::free_balance(666), 0);
+		assert_eq!(Balances::free_balance(777), 100);
+
+		System::reset_events();
+		assert_ok!(Utility::dispatch_as(
+			RuntimeOrigin::root(),
+			Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(777))),
+			Box::new(RuntimeCall::Timestamp(TimestampCall::set { now: 0 }))
+		));
+		assert_eq!(
+			utility_events(),
+			vec![Event::DispatchedAs { result: Err(DispatchError::BadOrigin) }]
+		);
+	})
+}
+
 #[test]
 fn if_else_with_root_works() {
 	new_test_ext().execute_with(|| {
@@ -983,6 +1018,31 @@ fn if_else_successful_main_call() {
 	})
 }
 
+#[test]
+fn dispatch_as_fallible_works() {
+	new_test_ext().execute_with(|| {
+		Balances::force_set_balance(RuntimeOrigin::root(), 666, 100).unwrap();
+		assert_eq!(Balances::free_balance(666), 100);
+		assert_eq!(Balances::free_balance(777), 0);
+		assert_ok!(Utility::dispatch_as_fallible(
+			RuntimeOrigin::root(),
+			Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(666))),
+			Box::new(call_transfer(777, 100))
+		));
+		assert_eq!(Balances::free_balance(666), 0);
+		assert_eq!(Balances::free_balance(777), 100);
+
+		assert_noop!(
+			Utility::dispatch_as_fallible(
+				RuntimeOrigin::root(),
+				Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(777))),
+				Box::new(RuntimeCall::Timestamp(TimestampCall::set { now: 0 }))
+			),
+			DispatchError::BadOrigin,
+		);
+	})
+}
+
 #[test]
 fn if_else_failing_fallback_call() {
 	new_test_ext().execute_with(|| {
diff --git a/substrate/frame/utility/src/weights.rs b/substrate/frame/utility/src/weights.rs
index 30922bbb22d5b30cd2f48c2eec667b51c2f8ce15..ce57af72f91424ffe6c1d2ca74700c45b3617664 100644
--- a/substrate/frame/utility/src/weights.rs
+++ b/substrate/frame/utility/src/weights.rs
@@ -56,6 +56,7 @@ pub trait WeightInfo {
 	fn batch_all(c: u32, ) -> Weight;
 	fn dispatch_as() -> Weight;
 	fn force_batch(c: u32, ) -> Weight;
+	fn dispatch_as_fallible() -> Weight;
 	fn if_else() -> Weight;
 }
 
@@ -126,6 +127,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 			.saturating_add(Weight::from_parts(4_570_923, 0).saturating_mul(c.into()))
 			.saturating_add(T::DbWeight::get().reads(2_u64))
 	}
+	fn dispatch_as_fallible() -> Weight {
+		Weight::MAX
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
@@ -202,6 +207,11 @@ impl WeightInfo for () {
 			.saturating_add(Weight::from_parts(4_570_923, 0).saturating_mul(c.into()))
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 	}
+
+	fn dispatch_as_fallible() -> Weight {
+		Weight::MAX
+	}
+
 	fn if_else() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
diff --git a/substrate/frame/vesting/src/lib.rs b/substrate/frame/vesting/src/lib.rs
index 15f8d397f81c9c345348a16bca290d6ab86837cc..357f454519dd9b4ac28407445c48429f7d344c5c 100644
--- a/substrate/frame/vesting/src/lib.rs
+++ b/substrate/frame/vesting/src/lib.rs
@@ -59,7 +59,7 @@ pub mod weights;
 extern crate alloc;
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::{fmt::Debug, marker::PhantomData};
 use frame_support::{
 	dispatch::DispatchResult,
@@ -96,7 +96,7 @@ const VESTING_ID: LockIdentifier = *b"vesting ";
 // A value placed in storage that represents the current version of the Vesting storage.
 // This value is used by `on_runtime_upgrade` to determine whether we run storage migration logic.
 #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
-enum Releases {
+pub enum Releases {
 	V0,
 	V1,
 }
@@ -179,7 +179,28 @@ pub mod pallet {
 		/// the unvested amount.
 		type UnvestedFundsAllowedWithdrawReasons: Get<WithdrawReasons>;
 
-		/// Provider for the block number.
+		/// Query the current block number.
+		///
+		/// Must return monotonically increasing values when called from consecutive blocks.
+		/// Can be configured to return either:
+		/// - the local block number of the runtime via `frame_system::Pallet`
+		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
+		/// - an arbitrary value through a custom implementation of the trait
+		///
+		/// There is currently no migration provided to "hot-swap" block number providers and it may
+		/// result in undefined behavior when doing so. Parachains are therefore best off setting
+		/// this to their local block number provider if they have the pallet already deployed.
+		///
+		/// Suggested values:
+		/// - Solo- and Relay-chains: `frame_system::Pallet`
+		/// - Parachains that may produce blocks sparingly or only when needed (on-demand):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: `RelaychainDataProvider`
+		/// - Parachains with a reliably block production rate (PLO or bulk-coretime):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: no strong recommendation. Both local and remote
+		///     providers can be used. Relay provider can be a bit better in cases where the
+		///     parachain is lagging its block production to avoid clock skew.
 		type BlockNumberProvider: BlockNumberProvider<BlockNumber = BlockNumberFor<Self>>;
 
 		/// Maximum number of vesting schedules an account may have at a given moment.
@@ -214,7 +235,7 @@ pub mod pallet {
 	///
 	/// New networks start with latest version, as determined by the genesis build.
 	#[pallet::storage]
-	pub(crate) type StorageVersion<T: Config> = StorageValue<_, Releases, ValueQuery>;
+	pub type StorageVersion<T: Config> = StorageValue<_, Releases, ValueQuery>;
 
 	#[pallet::pallet]
 	pub struct Pallet<T>(_);
diff --git a/substrate/frame/vesting/src/vesting_info.rs b/substrate/frame/vesting/src/vesting_info.rs
index 5d5ae31fc324743eeb71ed0d898d91885d3cf73c..fd26ff3e6b77ea87fd40c40ddf489e6978af81b8 100644
--- a/substrate/frame/vesting/src/vesting_info.rs
+++ b/substrate/frame/vesting/src/vesting_info.rs
@@ -20,7 +20,18 @@
 use super::*;
 
 /// Struct to encode the vesting schedule of an individual account.
-#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Copy,
+	Clone,
+	PartialEq,
+	Eq,
+	RuntimeDebug,
+	MaxEncodedLen,
+	TypeInfo,
+)]
 pub struct VestingInfo<Balance, BlockNumber> {
 	/// Locked amount at genesis.
 	locked: Balance,
diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs
index a8eb6b786a31779a55690117818bb137cd9b192d..818062717cfceda4577fe385cf77497c261f81a8 100644
--- a/substrate/primitives/application-crypto/src/lib.rs
+++ b/substrate/primitives/application-crypto/src/lib.rs
@@ -251,6 +251,7 @@ macro_rules! app_crypto_public_full_crypto {
 				Clone, Eq, Hash, PartialEq, PartialOrd, Ord,
 				$crate::codec::Encode,
 				$crate::codec::Decode,
+				$crate::codec::DecodeWithMemTracking,
 				$crate::RuntimeDebug,
 				$crate::codec::MaxEncodedLen,
 				$crate::scale_info::TypeInfo,
@@ -287,6 +288,7 @@ macro_rules! app_crypto_public_not_full_crypto {
 				Clone, Eq, Hash, PartialEq, Ord, PartialOrd,
 				$crate::codec::Encode,
 				$crate::codec::Decode,
+				$crate::codec::DecodeWithMemTracking,
 				$crate::RuntimeDebug,
 				$crate::codec::MaxEncodedLen,
 				$crate::scale_info::TypeInfo,
@@ -432,6 +434,7 @@ macro_rules! app_crypto_signature_full_crypto {
 			#[derive(Clone, Eq, PartialEq,
 				$crate::codec::Encode,
 				$crate::codec::Decode,
+				$crate::codec::DecodeWithMemTracking,
 				$crate::RuntimeDebug,
 				$crate::scale_info::TypeInfo,
 			)]
@@ -466,6 +469,7 @@ macro_rules! app_crypto_signature_not_full_crypto {
 			#[derive(Clone, Eq, PartialEq,
 				$crate::codec::Encode,
 				$crate::codec::Decode,
+				$crate::codec::DecodeWithMemTracking,
 				$crate::RuntimeDebug,
 				$crate::scale_info::TypeInfo,
 			)]
diff --git a/substrate/primitives/arithmetic/src/fixed_point.rs b/substrate/primitives/arithmetic/src/fixed_point.rs
index c4e9259c5fc94e3494a4df2bacc36fcdb562e006..9b5ef81917ca2ee208038dbda00ca8e8f0b5e330 100644
--- a/substrate/primitives/arithmetic/src/fixed_point.rs
+++ b/substrate/primitives/arithmetic/src/fixed_point.rs
@@ -52,7 +52,7 @@ use crate::{
 	},
 	PerThing, Perbill, Rounding, SignedRounding,
 };
-use codec::{CompactAs, Decode, Encode};
+use codec::{CompactAs, Decode, DecodeWithMemTracking, Encode};
 use core::{
 	fmt::Debug,
 	ops::{self, Add, Div, Mul, Sub},
@@ -404,6 +404,7 @@ macro_rules! implement_fixed {
 		#[derive(
 			Encode,
 			Decode,
+			DecodeWithMemTracking,
 			CompactAs,
 			Default,
 			Copy,
diff --git a/substrate/primitives/arithmetic/src/lib.rs b/substrate/primitives/arithmetic/src/lib.rs
index 01c403a7c4af284a2e6eaec65a5e5e820be9163e..7ac51543ba333d2971d9a87995b049c216050856 100644
--- a/substrate/primitives/arithmetic/src/lib.rs
+++ b/substrate/primitives/arithmetic/src/lib.rs
@@ -55,14 +55,25 @@ use alloc::vec::Vec;
 use core::{cmp::Ordering, fmt::Debug};
 use traits::{BaseArithmetic, One, SaturatedConversion, Unsigned, Zero};
 
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 
 #[cfg(feature = "serde")]
 use serde::{Deserialize, Serialize};
 
 /// Arithmetic errors.
-#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Eq,
+	PartialEq,
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Debug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub enum ArithmeticError {
 	/// Underflow.
diff --git a/substrate/primitives/arithmetic/src/per_things.rs b/substrate/primitives/arithmetic/src/per_things.rs
index f73dbe30cec17ea56b2c349d057df92b76edf724..6d0394cd1da54ddc063330b2000d8ef561400b22 100644
--- a/substrate/primitives/arithmetic/src/per_things.rs
+++ b/substrate/primitives/arithmetic/src/per_things.rs
@@ -58,7 +58,7 @@ use crate::traits::{
 	BaseArithmetic, Bounded, CheckedAdd, CheckedMul, CheckedSub, One, SaturatedConversion,
 	Saturating, UniqueSaturatedInto, Unsigned, Zero,
 };
-use codec::{CompactAs, Encode};
+use codec::{CompactAs, DecodeWithMemTracking, Encode};
 use core::{
 	fmt, ops,
 	ops::{Add, Sub},
@@ -592,7 +592,7 @@ macro_rules! implement_per_thing {
 		///
 		#[doc = $title]
 		#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-		#[derive(Encode, Copy, Clone, PartialEq, Eq, codec::MaxEncodedLen, PartialOrd, Ord, scale_info::TypeInfo)]
+		#[derive(Encode, DecodeWithMemTracking, Copy, Clone, PartialEq, Eq, codec::MaxEncodedLen, PartialOrd, Ord, scale_info::TypeInfo)]
 		pub struct $name($type);
 
 		/// Implementation makes any compact encoding of `PerThing::Inner` valid,
diff --git a/substrate/primitives/consensus/babe/src/digests.rs b/substrate/primitives/consensus/babe/src/digests.rs
index e7af8c5763a0f93223c749cea6b21313f46ba430..6516a3c0fd8eb3b67947f0ac0ac042d70cb5582c 100644
--- a/substrate/primitives/consensus/babe/src/digests.rs
+++ b/substrate/primitives/consensus/babe/src/digests.rs
@@ -27,7 +27,7 @@ use alloc::vec::Vec;
 use sp_core::sr25519::vrf::VrfSignature;
 use sp_runtime::{DigestItem, RuntimeDebug};
 
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 
 /// Raw BABE primary slot assignment pre-digest.
@@ -139,7 +139,15 @@ pub struct NextEpochDescriptor {
 /// Information about the next epoch config, if changed. This is broadcast in the first
 /// block of the epoch, and applies using the same rules as `NextEpochDescriptor`.
 #[derive(
-	Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug, MaxEncodedLen, scale_info::TypeInfo,
+	Decode,
+	DecodeWithMemTracking,
+	Encode,
+	PartialEq,
+	Eq,
+	Clone,
+	RuntimeDebug,
+	MaxEncodedLen,
+	scale_info::TypeInfo,
 )]
 pub enum NextConfigDescriptor {
 	/// Version 1.
diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs
index 163fbafa8dd4c6c80a37fe2b6bf820c24c9a6065..b29785b1c4463c39e2a93f51773bf5b422e798fd 100644
--- a/substrate/primitives/consensus/babe/src/lib.rs
+++ b/substrate/primitives/consensus/babe/src/lib.rs
@@ -27,7 +27,7 @@ pub mod inherents;
 
 #[cfg(not(feature = "std"))]
 use alloc::vec::Vec;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 #[cfg(feature = "serde")]
 use serde::{Deserialize, Serialize};
@@ -219,7 +219,18 @@ impl BabeConfiguration {
 }
 
 /// Types of allowed slots.
-#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)]
+#[derive(
+	Clone,
+	Copy,
+	PartialEq,
+	Eq,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	RuntimeDebug,
+	MaxEncodedLen,
+	TypeInfo,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub enum AllowedSlots {
 	/// Only allow primary slots.
diff --git a/substrate/primitives/consensus/beefy/src/commitment.rs b/substrate/primitives/consensus/beefy/src/commitment.rs
index 5d4338aca10f9cee1cf13548d9904f326cb728bc..49547ed877ecc3c5579b28646008c0eea45d844f 100644
--- a/substrate/primitives/consensus/beefy/src/commitment.rs
+++ b/substrate/primitives/consensus/beefy/src/commitment.rs
@@ -16,7 +16,7 @@
 // limitations under the License.
 
 use alloc::{vec, vec::Vec};
-use codec::{Decode, Encode, Error, Input};
+use codec::{Decode, DecodeWithMemTracking, Encode, Error, Input};
 use core::cmp;
 use scale_info::TypeInfo;
 use sp_application_crypto::RuntimeAppPublic;
@@ -50,7 +50,7 @@ impl<TAuthorityId: Clone, TSignature: Clone> KnownSignature<&TAuthorityId, &TSig
 /// height [block_number](Commitment::block_number).
 /// GRANDPA validators collect signatures on commitments and a stream of such signed commitments
 /// (see [SignedCommitment]) forms the BEEFY protocol.
-#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, TypeInfo)]
+#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, DecodeWithMemTracking, TypeInfo)]
 pub struct Commitment<TBlockNumber> {
 	///  A collection of payloads to be signed, see [`Payload`] for details.
 	///
diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs
index 0f57cdfc810420176c9354fd2bb29662f2a198cc..40bf158df87463dfd273c7e6f10c4f7b9a1b5de6 100644
--- a/substrate/primitives/consensus/beefy/src/lib.rs
+++ b/substrate/primitives/consensus/beefy/src/lib.rs
@@ -47,7 +47,7 @@ pub use commitment::{Commitment, KnownSignature, SignedCommitment, VersionedFina
 pub use payload::{known_payloads, BeefyPayloadId, Payload, PayloadProvider};
 
 use alloc::vec::Vec;
-use codec::{Codec, Decode, Encode};
+use codec::{Codec, Decode, DecodeWithMemTracking, Encode};
 use core::fmt::{Debug, Display};
 use scale_info::TypeInfo;
 use sp_application_crypto::{AppPublic, RuntimeAppPublic};
@@ -298,7 +298,7 @@ pub enum ConsensusLog<AuthorityId: Codec> {
 /// A vote message is a direct vote created by a BEEFY node on every voting round
 /// and is gossiped to its peers.
 // TODO: Remove `Signature` generic type, instead get it from `Id::Signature`.
-#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)]
+#[derive(Clone, Debug, Decode, DecodeWithMemTracking, Encode, PartialEq, TypeInfo)]
 pub struct VoteMessage<Number, Id, Signature> {
 	/// Commit to information extracted from a finalized block
 	pub commitment: Commitment<Number>,
@@ -313,7 +313,7 @@ pub struct VoteMessage<Number, Id, Signature> {
 /// One type of misbehavior in BEEFY happens when an authority votes in the same round/block
 /// for different payloads.
 /// Proving is achieved by collecting the signed commitments of conflicting votes.
-#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)]
+#[derive(Clone, Debug, Decode, DecodeWithMemTracking, Encode, PartialEq, TypeInfo)]
 pub struct DoubleVotingProof<Number, Id, Signature> {
 	/// The first vote in the equivocation.
 	pub first: VoteMessage<Number, Id, Signature>,
@@ -340,7 +340,7 @@ impl<Number, Id, Signature> DoubleVotingProof<Number, Id, Signature> {
 ///
 /// Proving is achieved by providing a proof that contains relevant info about the canonical chain
 /// at `commitment.block_number`. The `commitment` can be checked against this info.
-#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)]
+#[derive(Clone, Debug, Decode, DecodeWithMemTracking, Encode, PartialEq, TypeInfo)]
 pub struct ForkVotingProof<Header: HeaderT, Id: RuntimeAppPublic, AncestryProof> {
 	/// The equivocated vote.
 	pub vote: VoteMessage<Header::Number, Id, Id::Signature>,
@@ -364,7 +364,7 @@ impl<Header: HeaderT, Id: RuntimeAppPublic> ForkVotingProof<Header, Id, OpaqueVa
 }
 
 /// Proof showing that an authority voted for a future block.
-#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)]
+#[derive(Clone, Debug, Decode, DecodeWithMemTracking, Encode, PartialEq, TypeInfo)]
 pub struct FutureBlockVotingProof<Number, Id: RuntimeAppPublic> {
 	/// The equivocated vote.
 	pub vote: VoteMessage<Number, Id, Id::Signature>,
diff --git a/substrate/primitives/consensus/beefy/src/payload.rs b/substrate/primitives/consensus/beefy/src/payload.rs
index 2bc96548bdfff345292e8b9e12f735581a965828..82ca1ad21f024f362ecd783aa58b932fcff82329 100644
--- a/substrate/primitives/consensus/beefy/src/payload.rs
+++ b/substrate/primitives/consensus/beefy/src/payload.rs
@@ -16,7 +16,7 @@
 // limitations under the License.
 
 use alloc::{vec, vec::Vec};
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use scale_info::TypeInfo;
 use sp_runtime::traits::Block;
 
@@ -39,7 +39,19 @@ pub mod known_payloads {
 /// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected
 /// value. Duplicated identifiers are disallowed. It's okay for different implementations to only
 /// support a subset of possible values.
-#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash, TypeInfo)]
+#[derive(
+	Decode,
+	DecodeWithMemTracking,
+	Encode,
+	Debug,
+	PartialEq,
+	Eq,
+	Clone,
+	Ord,
+	PartialOrd,
+	Hash,
+	TypeInfo,
+)]
 pub struct Payload(Vec<(BeefyPayloadId, Vec<u8>)>);
 
 impl Payload {
diff --git a/substrate/primitives/consensus/grandpa/src/lib.rs b/substrate/primitives/consensus/grandpa/src/lib.rs
index d86ea0992c2e1c1beb134dddd057b45484d9891c..3aa4dc2b4cfb32a084a6b15c3ab385b21468a526 100644
--- a/substrate/primitives/consensus/grandpa/src/lib.rs
+++ b/substrate/primitives/consensus/grandpa/src/lib.rs
@@ -25,7 +25,7 @@ extern crate alloc;
 use serde::Serialize;
 
 use alloc::vec::Vec;
-use codec::{Codec, Decode, Encode};
+use codec::{Codec, Decode, DecodeWithMemTracking, Encode};
 use scale_info::TypeInfo;
 #[cfg(feature = "std")]
 use sp_keystore::KeystorePtr;
@@ -231,7 +231,7 @@ impl<N: Codec> ConsensusLog<N> {
 /// GRANDPA happens when a voter votes on the same round (either at prevote or
 /// precommit stage) for different blocks. Proving is achieved by collecting the
 /// signed messages of conflicting votes.
-#[derive(Clone, Debug, Decode, Encode, PartialEq, Eq, TypeInfo)]
+#[derive(Clone, Debug, Decode, DecodeWithMemTracking, Encode, PartialEq, Eq, TypeInfo)]
 pub struct EquivocationProof<H, N> {
 	set_id: SetId,
 	equivocation: Equivocation<H, N>,
@@ -265,7 +265,7 @@ impl<H, N> EquivocationProof<H, N> {
 
 /// Wrapper object for GRANDPA equivocation proofs, useful for unifying prevote
 /// and precommit equivocations under a common type.
-#[derive(Clone, Debug, Decode, Encode, PartialEq, Eq, TypeInfo)]
+#[derive(Clone, Debug, Decode, DecodeWithMemTracking, Encode, PartialEq, Eq, TypeInfo)]
 pub enum Equivocation<H, N> {
 	/// Proof of equivocation at prevote stage.
 	Prevote(
diff --git a/substrate/primitives/consensus/sassafras/src/lib.rs b/substrate/primitives/consensus/sassafras/src/lib.rs
index d7880c4de9e8fec6d5feb4345af90214f3ac76bc..09baf678f16de43c09d7dd8bf70b5f2fd53cd6ef 100644
--- a/substrate/primitives/consensus/sassafras/src/lib.rs
+++ b/substrate/primitives/consensus/sassafras/src/lib.rs
@@ -24,7 +24,7 @@
 extern crate alloc;
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 use sp_core::crypto::KeyTypeId;
 use sp_runtime::{ConsensusEngineId, RuntimeDebug};
@@ -86,7 +86,17 @@ pub type Randomness = [u8; RANDOMNESS_LENGTH];
 ///
 /// Mostly tweaks to the ticketing system parameters.
 #[derive(
-	Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default,
+	Copy,
+	Clone,
+	PartialEq,
+	Eq,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	RuntimeDebug,
+	MaxEncodedLen,
+	TypeInfo,
+	Default,
 )]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub struct EpochConfiguration {
diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs
index fd025f1d53eaffc01c615233d8000da220d6ea7c..1083530117cfe094ef0e6ee4dfc48a323a891181 100644
--- a/substrate/primitives/consensus/sassafras/src/ticket.rs
+++ b/substrate/primitives/consensus/sassafras/src/ticket.rs
@@ -18,7 +18,7 @@
 //! Primitives related to tickets.
 
 use crate::vrf::RingVrfSignature;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 
 pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSignature};
@@ -33,7 +33,9 @@ pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSign
 pub type TicketId = u128;
 
 /// Ticket data persisted on-chain.
-#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)]
+#[derive(
+	Debug, Clone, PartialEq, Eq, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, TypeInfo,
+)]
 pub struct TicketBody {
 	/// Attempt index.
 	pub attempt_idx: u32,
@@ -47,7 +49,9 @@ pub struct TicketBody {
 pub type TicketSignature = RingVrfSignature;
 
 /// Ticket envelope used on during submission.
-#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)]
+#[derive(
+	Debug, Clone, PartialEq, Eq, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, TypeInfo,
+)]
 pub struct TicketEnvelope {
 	/// Ticket body.
 	pub body: TicketBody,
diff --git a/substrate/primitives/consensus/slots/src/lib.rs b/substrate/primitives/consensus/slots/src/lib.rs
index dfa46fcf2571f3681e7de48995bef035daba0ba2..21f6f2e95b1bd057707ec360b43f485e16dc4ec8 100644
--- a/substrate/primitives/consensus/slots/src/lib.rs
+++ b/substrate/primitives/consensus/slots/src/lib.rs
@@ -19,12 +19,25 @@
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 use sp_timestamp::Timestamp;
 
 /// Unit type wrapper that represents a slot.
-#[derive(Debug, Encode, MaxEncodedLen, Decode, Eq, Clone, Copy, Default, Ord, Hash, TypeInfo)]
+#[derive(
+	Debug,
+	Encode,
+	MaxEncodedLen,
+	Decode,
+	DecodeWithMemTracking,
+	Eq,
+	Clone,
+	Copy,
+	Default,
+	Ord,
+	Hash,
+	TypeInfo,
+)]
 #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
 #[repr(transparent)]
 pub struct Slot(u64);
@@ -172,7 +185,7 @@ impl SlotDuration {
 /// produces more than one block on the same slot. The proof of equivocation
 /// are the given distinct headers that were signed by the validator and which
 /// include the slot number.
-#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo, Eq)]
+#[derive(Clone, Debug, Decode, DecodeWithMemTracking, Encode, PartialEq, TypeInfo, Eq)]
 pub struct EquivocationProof<Header, Id> {
 	/// Returns the authority id of the equivocator.
 	pub offender: Id,
diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs
index 25bf4657030fbcddcc75a419941a247759e0f9d5..0ede8119ce3d9ea65b9b4cceb919dc946fbf5567 100644
--- a/substrate/primitives/core/src/bandersnatch.rs
+++ b/substrate/primitives/core/src/bandersnatch.rs
@@ -28,7 +28,7 @@ use crate::crypto::{
 };
 
 use bandersnatch_vrfs::{CanonicalSerialize, SecretKey};
-use codec::{Decode, Encode, EncodeLike, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, EncodeLike, MaxEncodedLen};
 use scale_info::TypeInfo;
 
 use alloc::{vec, vec::Vec};
@@ -227,6 +227,22 @@ pub mod vrf {
 		}
 	}
 
+	// `VrfPreOutput` resolves to:
+	// ```
+	// pub struct Affine<P: SWCurveConfig> {
+	//     #[doc(hidden)]
+	//     pub x: P::BaseField,
+	//     #[doc(hidden)]
+	//     pub y: P::BaseField,
+	//     #[doc(hidden)]
+	//     pub infinity: bool,
+	// }
+	// ```
+	// where each `P::BaseField` contains a `pub struct BigInt<const N: usize>(pub [u64; N]);`
+	// Since none of these structures is allocated on the heap, we don't need any special
+	// memory tracking logic. We can simply implement `DecodeWithMemTracking`.
+	impl DecodeWithMemTracking for VrfPreOutput {}
+
 	impl EncodeLike for VrfPreOutput {}
 
 	impl MaxEncodedLen for VrfPreOutput {
@@ -657,7 +673,9 @@ pub mod ring_vrf {
 	}
 
 	/// Ring VRF signature.
-	#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)]
+	#[derive(
+		Clone, Debug, PartialEq, Eq, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, TypeInfo,
+	)]
 	pub struct RingVrfSignature {
 		/// Ring signature.
 		pub signature: [u8; RING_SIGNATURE_SERIALIZED_SIZE],
diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs
index cf24861e233c1c855fd36d9a752699cdac5a42b1..4838b48c0cd28b410a34e5ed38fb6e61e9c2723e 100644
--- a/substrate/primitives/core/src/crypto.rs
+++ b/substrate/primitives/core/src/crypto.rs
@@ -22,7 +22,7 @@ use alloc::{format, str, vec::Vec};
 #[cfg(all(not(feature = "std"), feature = "serde"))]
 use alloc::{string::String, vec};
 use bip39::{Language, Mnemonic};
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::hash::Hash;
 #[doc(hidden)]
 pub use core::ops::Deref;
@@ -501,7 +501,18 @@ pub trait Public: CryptoType + ByteArray + PartialEq + Eq + Clone + Send + Sync
 pub trait Signature: CryptoType + ByteArray + PartialEq + Eq + Clone + Send + Sync {}
 
 /// An opaque 32-byte cryptographic identifier.
-#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo)]
+#[derive(
+	Clone,
+	Eq,
+	PartialEq,
+	Ord,
+	PartialOrd,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	TypeInfo,
+)]
 #[cfg_attr(feature = "std", derive(Hash))]
 pub struct AccountId32([u8; 32]);
 
diff --git a/substrate/primitives/core/src/crypto_bytes.rs b/substrate/primitives/core/src/crypto_bytes.rs
index e5130e6d50079f3b9c8e8797f89e1c04e636679b..1964109cfac82eccaee1811ec35feac2eaad1c0e 100644
--- a/substrate/primitives/core/src/crypto_bytes.rs
+++ b/substrate/primitives/core/src/crypto_bytes.rs
@@ -22,7 +22,7 @@ use crate::{
 	hash::{H256, H512},
 };
 
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::marker::PhantomData;
 use scale_info::TypeInfo;
 
@@ -47,7 +47,7 @@ pub use signature_bytes::*;
 /// The tag `T` is held in a `PhantomData<fn() ->T>`, a trick allowing
 /// `CryptoBytes` to be `Send` and `Sync` regardless of `T` properties
 /// ([ref](https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns)).
-#[derive(Encode, Decode, MaxEncodedLen)]
+#[derive(Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]
 #[repr(transparent)]
 pub struct CryptoBytes<const N: usize, T = ()>(pub [u8; N], PhantomData<fn() -> T>);
 
diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs
index 454f61df79419b76960ae6d1e3419fb0a919320a..db2dfcb7a6e29e9e3c2b8af8b1a1ef6ecc8cce5e 100644
--- a/substrate/primitives/core/src/lib.rs
+++ b/substrate/primitives/core/src/lib.rs
@@ -35,7 +35,7 @@ extern crate alloc;
 
 use alloc::vec::Vec;
 #[doc(hidden)]
-pub use codec::{Decode, Encode, MaxEncodedLen};
+pub use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::ops::Deref;
 use scale_info::TypeInfo;
 #[cfg(feature = "serde")]
@@ -178,6 +178,7 @@ impl Deref for OpaqueMetadata {
 	PartialOrd,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	RuntimeDebug,
 	PassByInner,
 	TypeInfo,
@@ -324,7 +325,17 @@ pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 {
 
 /// The void type - it cannot exist.
 // Oh rust, you crack me up...
-#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Clone,
+	Decode,
+	DecodeWithMemTracking,
+	Encode,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 pub enum Void {}
 
 /// Macro for creating `Maybe*` marker traits.
diff --git a/substrate/primitives/merkle-mountain-range/src/lib.rs b/substrate/primitives/merkle-mountain-range/src/lib.rs
index 061e5dbb6c7d896ea2e18e1cb1b07beaff7c670d..53158476cf1074c1f01d4706341eef8ae0d396fb 100644
--- a/substrate/primitives/merkle-mountain-range/src/lib.rs
+++ b/substrate/primitives/merkle-mountain-range/src/lib.rs
@@ -26,6 +26,7 @@ pub use mmr_lib;
 
 #[cfg(not(feature = "std"))]
 use alloc::vec::Vec;
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use core::fmt;
 use scale_info::TypeInfo;
 use sp_debug_derive::RuntimeDebug;
@@ -362,7 +363,7 @@ pub struct LeafProof<Hash> {
 }
 
 /// An MMR ancestry proof for a prior mmr root.
-#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)]
+#[derive(Encode, Decode, DecodeWithMemTracking, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)]
 pub struct AncestryProof<Hash> {
 	/// Peaks of the ancestor's mmr
 	pub prev_peaks: Vec<Hash>,
diff --git a/substrate/primitives/metadata-ir/src/unstable.rs b/substrate/primitives/metadata-ir/src/unstable.rs
index 412cb78ce84f14cf7f805446d99e210514daef1a..d03a7ddad2685a7412592dbc3c854e27bd0b65ba 100644
--- a/substrate/primitives/metadata-ir/src/unstable.rs
+++ b/substrate/primitives/metadata-ir/src/unstable.rs
@@ -20,7 +20,8 @@
 use crate::{
 	DeprecationInfoIR, DeprecationStatusIR, OuterEnumsIR, PalletAssociatedTypeMetadataIR,
 	PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR,
-	PalletStorageMetadataIR, StorageEntryMetadataIR,
+	PalletStorageMetadataIR, PalletViewFunctionMethodMetadataIR,
+	PalletViewFunctionMethodParamMetadataIR, StorageEntryMetadataIR,
 };
 
 use super::types::{
@@ -31,9 +32,10 @@ use super::types::{
 use frame_metadata::v16::{
 	CustomMetadata, DeprecationInfo, DeprecationStatus, ExtrinsicMetadata, OuterEnums,
 	PalletAssociatedTypeMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata,
-	PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeApiMetadata,
-	RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, RuntimeMetadataV16,
-	StorageEntryMetadata, TransactionExtensionMetadata,
+	PalletEventMetadata, PalletMetadata, PalletStorageMetadata, PalletViewFunctionMetadata,
+	PalletViewFunctionParamMetadata, RuntimeApiMetadata, RuntimeApiMethodMetadata,
+	RuntimeApiMethodParamMetadata, RuntimeMetadataV16, StorageEntryMetadata,
+	TransactionExtensionMetadata,
 };
 
 impl From<MetadataIR> for RuntimeMetadataV16 {
@@ -85,8 +87,7 @@ impl From<PalletMetadataIR> for PalletMetadata {
 			name: ir.name,
 			storage: ir.storage.map(Into::into),
 			calls: ir.calls.map(Into::into),
-			// TODO: add with the new v16 release of frame-metadata
-			// view_functions: ir.view_functions.into_iter().map(Into::into).collect(),
+			view_functions: ir.view_functions.into_iter().map(Into::into).collect(),
 			event: ir.event.map(Into::into),
 			constants: ir.constants.into_iter().map(Into::into).collect(),
 			error: ir.error.map(Into::into),
@@ -144,6 +145,25 @@ impl From<PalletCallMetadataIR> for PalletCallMetadata {
 	}
 }
 
+impl From<PalletViewFunctionMethodMetadataIR> for PalletViewFunctionMetadata {
+	fn from(ir: PalletViewFunctionMethodMetadataIR) -> Self {
+		PalletViewFunctionMetadata {
+			name: ir.name,
+			id: ir.id,
+			inputs: ir.inputs.into_iter().map(Into::into).collect(),
+			output: ir.output,
+			docs: ir.docs.into_iter().map(Into::into).collect(),
+			deprecation_info: ir.deprecation_info.into(),
+		}
+	}
+}
+
+impl From<PalletViewFunctionMethodParamMetadataIR> for PalletViewFunctionParamMetadata {
+	fn from(ir: PalletViewFunctionMethodParamMetadataIR) -> Self {
+		PalletViewFunctionParamMetadata { name: ir.name, ty: ir.ty }
+	}
+}
+
 impl From<PalletConstantMetadataIR> for PalletConstantMetadata {
 	fn from(ir: PalletConstantMetadataIR) -> Self {
 		PalletConstantMetadata {
diff --git a/substrate/primitives/npos-elections/src/lib.rs b/substrate/primitives/npos-elections/src/lib.rs
index 12d4c5948ed9c1c56bf36c05823fcd7e1736d406..6fcdd38d4010789faac961e2568d72ee4cc45ec1 100644
--- a/substrate/primitives/npos-elections/src/lib.rs
+++ b/substrate/primitives/npos-elections/src/lib.rs
@@ -77,7 +77,7 @@
 extern crate alloc;
 
 use alloc::{collections::btree_map::BTreeMap, rc::Rc, vec, vec::Vec};
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::{cell::RefCell, cmp::Ordering};
 use scale_info::TypeInfo;
 #[cfg(feature = "serde")]
@@ -111,7 +111,14 @@ pub use traits::{IdentifierT, PerThing128};
 
 /// The errors that might occur in this crate and `frame-election-provider-solution-type`.
 #[derive(
-	Eq, PartialEq, RuntimeDebug, Clone, codec::Encode, codec::Decode, scale_info::TypeInfo,
+	Eq,
+	PartialEq,
+	RuntimeDebug,
+	Clone,
+	codec::Encode,
+	codec::Decode,
+	codec::DecodeWithMemTracking,
+	scale_info::TypeInfo,
 )]
 pub enum Error {
 	/// While going from solution indices to ratio, the weight of all the edges has gone above the
@@ -147,7 +154,19 @@ pub type ExtendedBalance = u128;
 /// 1. `minimal_stake`.
 /// 2. `sum_stake`.
 /// 3. `sum_stake_squared`.
-#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo, Debug, Default)]
+#[derive(
+	Clone,
+	Copy,
+	PartialEq,
+	Eq,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	TypeInfo,
+	Debug,
+	Default,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub struct ElectionScore {
 	/// The minimal winner, in terms of total backing stake.
@@ -433,7 +452,7 @@ pub struct ElectionResult<AccountId, P: PerThing> {
 ///
 /// This, at the current version, resembles the `Exposure` defined in the Staking pallet, yet they
 /// do not necessarily have to be the same.
-#[derive(RuntimeDebug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)]
+#[derive(RuntimeDebug, Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub struct Support<AccountId> {
 	/// Total support.
diff --git a/substrate/primitives/runtime/src/generic/digest.rs b/substrate/primitives/runtime/src/generic/digest.rs
index 5ed0c7075cae653839961ab8b910fdcd8b88e0ae..593fbdd66c4775b5c28155912eb77f3a12431ca4 100644
--- a/substrate/primitives/runtime/src/generic/digest.rs
+++ b/substrate/primitives/runtime/src/generic/digest.rs
@@ -25,7 +25,7 @@ use codec::DecodeAll;
 use serde::{Deserialize, Serialize};
 
 use crate::{
-	codec::{Decode, Encode, Error, Input},
+	codec::{Decode, DecodeWithMemTracking, Encode, Error, Input},
 	scale_info::{
 		build::{Fields, Variants},
 		Path, Type, TypeInfo,
@@ -35,7 +35,9 @@ use crate::{
 use sp_core::RuntimeDebug;
 
 /// Generic header digest.
-#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, Default)]
+#[derive(
+	PartialEq, Eq, Clone, Encode, Decode, DecodeWithMemTracking, RuntimeDebug, TypeInfo, Default,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub struct Digest {
 	/// A list of logs in the digest.
@@ -71,7 +73,7 @@ impl Digest {
 
 /// Digest item that is able to encode/decode 'system' digest items and
 /// provide opaque access to other items.
-#[derive(PartialEq, Eq, Clone, RuntimeDebug)]
+#[derive(PartialEq, Eq, Clone, DecodeWithMemTracking, RuntimeDebug)]
 pub enum DigestItem {
 	/// A pre-runtime digest.
 	///
diff --git a/substrate/primitives/runtime/src/generic/header.rs b/substrate/primitives/runtime/src/generic/header.rs
index d78aa5c8d3c29ea35eacecdbd0c40ebe0bfec926..cde8098404b4479b2c9c0ac401b89eb7e95e8cc7 100644
--- a/substrate/primitives/runtime/src/generic/header.rs
+++ b/substrate/primitives/runtime/src/generic/header.rs
@@ -18,7 +18,7 @@
 //! Generic implementation of a block header.
 
 use crate::{
-	codec::{Codec, Decode, Encode},
+	codec::{Codec, Decode, DecodeWithMemTracking, Encode},
 	generic::Digest,
 	scale_info::TypeInfo,
 	traits::{self, AtLeast32BitUnsigned, BlockNumber, Hash as HashT, MaybeDisplay, Member},
@@ -28,7 +28,9 @@ use serde::{Deserialize, Serialize};
 use sp_core::U256;
 
 /// Abstraction over a block header for a substrate chain.
-#[derive(Encode, Decode, PartialEq, Eq, Clone, sp_core::RuntimeDebug, TypeInfo)]
+#[derive(
+	Encode, Decode, DecodeWithMemTracking, PartialEq, Eq, Clone, sp_core::RuntimeDebug, TypeInfo,
+)]
 #[scale_info(skip_type_params(Hash))]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs
index f0c8e50f1ba1e66993b98badf980d1f385f8591c..cd5dd725c35a76bbb501a8cd12dd433f3d7702cd 100644
--- a/substrate/primitives/runtime/src/lib.rs
+++ b/substrate/primitives/runtime/src/lib.rs
@@ -80,7 +80,7 @@ use sp_core::{
 };
 
 use alloc::vec;
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
 
 pub mod curve;
@@ -272,7 +272,17 @@ pub type ConsensusEngineId = [u8; 4];
 
 /// Signature verify that can work with any known signature types.
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
-#[derive(Eq, PartialEq, Clone, Encode, Decode, MaxEncodedLen, RuntimeDebug, TypeInfo)]
+#[derive(
+	Eq,
+	PartialEq,
+	Clone,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	RuntimeDebug,
+	TypeInfo,
+)]
 pub enum MultiSignature {
 	/// An Ed25519 signature.
 	Ed25519(ed25519::Signature),
@@ -511,7 +521,9 @@ pub type DispatchResult = core::result::Result<(), DispatchError>;
 pub type DispatchResultWithInfo<T> = core::result::Result<T, DispatchErrorWithPostInfo<T>>;
 
 /// Reason why a pallet call failed.
-#[derive(Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Eq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, Debug, TypeInfo, MaxEncodedLen,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub struct ModuleError {
 	/// Module index, matching the metadata module index.
@@ -531,7 +543,18 @@ impl PartialEq for ModuleError {
 }
 
 /// Errors related to transactional storage layers.
-#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Eq,
+	PartialEq,
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Debug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub enum TransactionalError {
 	/// Too many transactional layers have been spawned.
@@ -556,7 +579,18 @@ impl From<TransactionalError> for DispatchError {
 }
 
 /// Reason why a dispatch call failed.
-#[derive(Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo, PartialEq, MaxEncodedLen)]
+#[derive(
+	Eq,
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Debug,
+	TypeInfo,
+	PartialEq,
+	MaxEncodedLen,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub enum DispatchError {
 	/// Some error occurred.
@@ -598,7 +632,9 @@ pub enum DispatchError {
 
 /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information about
 /// the `Dispatchable` that is only known post dispatch.
-#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, RuntimeDebug, TypeInfo)]
+#[derive(
+	Eq, PartialEq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, RuntimeDebug, TypeInfo,
+)]
 pub struct DispatchErrorWithPostInfo<Info>
 where
 	Info: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable,
@@ -643,7 +679,18 @@ impl From<crate::traits::BadOrigin> for DispatchError {
 }
 
 /// Description of what went wrong when trying to complete an operation on a token.
-#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Eq,
+	PartialEq,
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Debug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub enum TokenError {
 	/// Funds are unavailable.
diff --git a/substrate/primitives/runtime/src/multiaddress.rs b/substrate/primitives/runtime/src/multiaddress.rs
index 4382405a8ebc67bf8975cb35b99d18b7b213dd2b..5a309af704206e4ea34781073f4ac6c5022059c0 100644
--- a/substrate/primitives/runtime/src/multiaddress.rs
+++ b/substrate/primitives/runtime/src/multiaddress.rs
@@ -18,10 +18,19 @@
 //! MultiAddress type is a wrapper for multiple downstream account formats.
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 
 /// A multi-format address wrapper for on-chain accounts.
-#[derive(Encode, Decode, PartialEq, Eq, Clone, crate::RuntimeDebug, scale_info::TypeInfo)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	PartialEq,
+	Eq,
+	Clone,
+	crate::RuntimeDebug,
+	scale_info::TypeInfo,
+)]
 #[cfg_attr(feature = "std", derive(Hash))]
 pub enum MultiAddress<AccountId, AccountIndex> {
 	/// It's an account ID (pubkey).
diff --git a/substrate/primitives/runtime/src/proving_trie/mod.rs b/substrate/primitives/runtime/src/proving_trie/mod.rs
index 32b2284b4d79d9e7481e8231cda3dfa454c84ffc..bd5ddbe9742923f739e50d161cf2da88e9e44f6a 100644
--- a/substrate/primitives/runtime/src/proving_trie/mod.rs
+++ b/substrate/primitives/runtime/src/proving_trie/mod.rs
@@ -20,14 +20,25 @@
 pub mod base16;
 pub mod base2;
 
-use crate::{Decode, DispatchError, Encode, MaxEncodedLen, TypeInfo};
+use crate::{Decode, DecodeWithMemTracking, DispatchError, Encode, MaxEncodedLen, TypeInfo};
 #[cfg(feature = "serde")]
 use crate::{Deserialize, Serialize};
 use alloc::vec::Vec;
 use sp_trie::{trie_types::TrieError as SpTrieError, VerifyError};
 
 /// A runtime friendly error type for tries.
-#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)]
+#[derive(
+	Eq,
+	PartialEq,
+	Clone,
+	Copy,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Debug,
+	TypeInfo,
+	MaxEncodedLen,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 pub enum TrieError {
 	/* From TrieError */
diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs
index 1fc78cce6707c1c16f7cab2ef0e73265d8777ef1..3f037eb153b9fb2bf7c64f9b419c3b16654d21b3 100644
--- a/substrate/primitives/runtime/src/testing.rs
+++ b/substrate/primitives/runtime/src/testing.rs
@@ -18,7 +18,7 @@
 //! Testing utilities.
 
 use crate::{
-	codec::{Codec, Decode, Encode, MaxEncodedLen},
+	codec::{Codec, Decode, DecodeWithMemTracking, Encode, MaxEncodedLen},
 	generic::{self, UncheckedExtrinsic},
 	scale_info::TypeInfo,
 	traits::{self, BlakeTwo256, Dispatchable, OpaqueKeys},
@@ -42,6 +42,7 @@ use std::{cell::RefCell, fmt::Debug};
 	Clone,
 	Encode,
 	Decode,
+	DecodeWithMemTracking,
 	Debug,
 	Hash,
 	Serialize,
@@ -165,7 +166,19 @@ impl traits::Verify for UintAuthorityId {
 }
 
 /// A dummy signature type, to match `UintAuthorityId`.
-#[derive(Eq, PartialEq, Clone, Debug, Hash, Serialize, Deserialize, Encode, Decode, TypeInfo)]
+#[derive(
+	Eq,
+	PartialEq,
+	Clone,
+	Debug,
+	Hash,
+	Serialize,
+	Deserialize,
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	TypeInfo,
+)]
 pub struct TestSignature(pub u64, pub Vec<u8>);
 
 impl traits::Verify for TestSignature {
diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs
index 46f17a0fcc6337a9973b13ca03604c9c115f441e..3781120fa4488c07735d7eb39f9063f9e1613ee9 100644
--- a/substrate/primitives/runtime/src/traits/mod.rs
+++ b/substrate/primitives/runtime/src/traits/mod.rs
@@ -2066,6 +2066,7 @@ macro_rules! impl_opaque_keys_inner {
 			Clone, PartialEq, Eq,
 			$crate::codec::Encode,
 			$crate::codec::Decode,
+			$crate::codec::DecodeWithMemTracking,
 			$crate::scale_info::TypeInfo,
 			$crate::RuntimeDebug,
 		)]
diff --git a/substrate/primitives/session/src/lib.rs b/substrate/primitives/session/src/lib.rs
index fe7a38047338d92d18e5427587c7e46bdee11829..3225097b665e6ca5a642b5ee130674f64f9b7134 100644
--- a/substrate/primitives/session/src/lib.rs
+++ b/substrate/primitives/session/src/lib.rs
@@ -21,7 +21,7 @@
 
 extern crate alloc;
 
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 
 #[cfg(feature = "std")]
 use sp_api::ProvideRuntimeApi;
@@ -39,7 +39,17 @@ pub use runtime_api::*;
 pub type ValidatorCount = u32;
 
 /// Proof of membership of a specific key in a given session.
-#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, RuntimeDebug, scale_info::TypeInfo)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	Clone,
+	Eq,
+	PartialEq,
+	Default,
+	RuntimeDebug,
+	scale_info::TypeInfo,
+)]
 pub struct MembershipProof {
 	/// The session index on which the specific key is a member.
 	pub session: SessionIndex,
diff --git a/substrate/primitives/statement-store/src/lib.rs b/substrate/primitives/statement-store/src/lib.rs
index dbac017ff6493e1da04251b33da9510d12eed078..d765ddad3ed8ce1c2591608fe73c5d3d5bfde3e5 100644
--- a/substrate/primitives/statement-store/src/lib.rs
+++ b/substrate/primitives/statement-store/src/lib.rs
@@ -23,7 +23,7 @@
 extern crate alloc;
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use scale_info::TypeInfo;
 use sp_application_crypto::RuntimeAppPublic;
 #[cfg(feature = "std")]
@@ -93,7 +93,9 @@ pub fn hash_encoded(data: &[u8]) -> [u8; 32] {
 }
 
 /// Statement proof.
-#[derive(Encode, Decode, TypeInfo, sp_core::RuntimeDebug, Clone, PartialEq, Eq)]
+#[derive(
+	Encode, Decode, DecodeWithMemTracking, TypeInfo, sp_core::RuntimeDebug, Clone, PartialEq, Eq,
+)]
 pub enum Proof {
 	/// Sr25519 Signature.
 	Sr25519 {
@@ -174,7 +176,16 @@ impl Field {
 }
 
 /// Statement structure.
-#[derive(TypeInfo, sp_core::RuntimeDebug, PassByCodec, Clone, PartialEq, Eq, Default)]
+#[derive(
+	DecodeWithMemTracking,
+	TypeInfo,
+	sp_core::RuntimeDebug,
+	PassByCodec,
+	Clone,
+	PartialEq,
+	Eq,
+	Default,
+)]
 pub struct Statement {
 	proof: Option<Proof>,
 	decryption_key: Option<DecryptionKey>,
diff --git a/substrate/primitives/transaction-storage-proof/src/lib.rs b/substrate/primitives/transaction-storage-proof/src/lib.rs
index 893b2e33bee6ca21bd900e485d482cb0acba3cc7..52a8703cf6e738cec9e899d57726b0263064a19e 100644
--- a/substrate/primitives/transaction-storage-proof/src/lib.rs
+++ b/substrate/primitives/transaction-storage-proof/src/lib.rs
@@ -25,7 +25,7 @@ extern crate alloc;
 use core::result::Result;
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use sp_inherents::{InherentData, InherentIdentifier, IsFatalError};
 use sp_runtime::traits::{Block as BlockT, NumberFor};
 
@@ -54,7 +54,7 @@ impl IsFatalError for InherentError {
 
 /// Holds a chunk of data retrieved from storage along with
 /// a proof that the data was stored at that location in the trie.
-#[derive(Encode, Decode, Clone, PartialEq, Debug, scale_info::TypeInfo)]
+#[derive(Encode, Decode, DecodeWithMemTracking, Clone, PartialEq, Debug, scale_info::TypeInfo)]
 pub struct TransactionStorageProof {
 	/// Data chunk that is proved to exist.
 	pub chunk: Vec<u8>,
diff --git a/substrate/primitives/weights/src/weight_v2.rs b/substrate/primitives/weights/src/weight_v2.rs
index 0f92e6448ca94485cfc56206d2b0068100c8e049..87e6e69c49f63a53d0e655d32457e49e777fdc3d 100644
--- a/substrate/primitives/weights/src/weight_v2.rs
+++ b/substrate/primitives/weights/src/weight_v2.rs
@@ -15,13 +15,25 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-use codec::{Decode, Encode, MaxEncodedLen};
+use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
 use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign};
 use sp_arithmetic::traits::{Bounded, CheckedAdd, CheckedSub, Zero};
 
 use super::*;
 
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Copy, Clone, Debug, Default)]
+#[derive(
+	Encode,
+	Decode,
+	DecodeWithMemTracking,
+	MaxEncodedLen,
+	TypeInfo,
+	Eq,
+	PartialEq,
+	Copy,
+	Clone,
+	Debug,
+	Default,
+)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
 #[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))]
 pub struct Weight {
diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs
index 7c092f2851663d2a2807807e5d812a3c0071a71e..b5d3bf2dbbea79dfb4a67d03bc0f7420d5de49b5 100644
--- a/substrate/test-utils/runtime/src/lib.rs
+++ b/substrate/test-utils/runtime/src/lib.rs
@@ -29,7 +29,7 @@ pub mod substrate_test_pallet;
 
 #[cfg(not(feature = "std"))]
 use alloc::{vec, vec::Vec};
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeWithMemTracking, Encode};
 use frame_support::{
 	construct_runtime, derive_impl,
 	dispatch::DispatchClass,
@@ -135,7 +135,7 @@ pub fn native_version() -> NativeVersion {
 }
 
 /// Transfer data extracted from Extrinsic containing `Balances::transfer_allow_death`.
-#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)]
+#[derive(Clone, PartialEq, Eq, Encode, Decode, DecodeWithMemTracking, RuntimeDebug, TypeInfo)]
 pub struct TransferData {
 	pub from: AccountId,
 	pub to: AccountId,
diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs
index 1e9155f59a57a8c9f5f34ef946fd3b39113e7a74..b71e91d422d51b8a37276e08d1312fc1e9b10f81 100644
--- a/templates/parachain/runtime/src/configs/mod.rs
+++ b/templates/parachain/runtime/src/configs/mod.rs
@@ -274,6 +274,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml
index 8cd5cf0c838e6987baa662f21f8c36666360643f..9e0a75d40844a49a80796f05027c0e3995b3f852 100644
--- a/umbrella/Cargo.toml
+++ b/umbrella/Cargo.toml
@@ -136,6 +136,8 @@ std = [
 	"pallet-session?/std",
 	"pallet-skip-feeless-payment?/std",
 	"pallet-society?/std",
+	"pallet-staking-ah-client?/std",
+	"pallet-staking-rc-client?/std",
 	"pallet-staking-reward-fn?/std",
 	"pallet-staking-runtime-api?/std",
 	"pallet-staking?/std",
@@ -324,6 +326,8 @@ runtime-benchmarks = [
 	"pallet-session-benchmarking?/runtime-benchmarks",
 	"pallet-skip-feeless-payment?/runtime-benchmarks",
 	"pallet-society?/runtime-benchmarks",
+	"pallet-staking-ah-client?/runtime-benchmarks",
+	"pallet-staking-rc-client?/runtime-benchmarks",
 	"pallet-staking?/runtime-benchmarks",
 	"pallet-state-trie-migration?/runtime-benchmarks",
 	"pallet-sudo?/runtime-benchmarks",
@@ -464,6 +468,8 @@ try-runtime = [
 	"pallet-session?/try-runtime",
 	"pallet-skip-feeless-payment?/try-runtime",
 	"pallet-society?/try-runtime",
+	"pallet-staking-ah-client?/try-runtime",
+	"pallet-staking-rc-client?/try-runtime",
 	"pallet-staking?/try-runtime",
 	"pallet-state-trie-migration?/try-runtime",
 	"pallet-statement?/try-runtime",
@@ -552,7 +558,7 @@ with-tracing = [
 	"sp-tracing?/with-tracing",
 	"sp-tracing?/with-tracing",
 ]
-runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-assets-holder", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-block", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
+runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-assets-holder", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-block", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-ah-client", "pallet-staking-rc-client", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
 runtime = [
 	"frame-benchmarking",
 	"frame-benchmarking-pallet-pov",
@@ -1289,6 +1295,16 @@ default-features = false
 optional = true
 path = "../substrate/frame/staking"
 
+[dependencies.pallet-staking-ah-client]
+default-features = false
+optional = true
+path = "../substrate/frame/staking/ah-client"
+
+[dependencies.pallet-staking-rc-client]
+default-features = false
+optional = true
+path = "../substrate/frame/staking/rc-client"
+
 [dependencies.pallet-staking-reward-curve]
 default-features = false
 optional = true
diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs
index 89cd300b418f64d41c75a8919a41f018b63749c4..19f80aac4a451d4d36cd0c58e7c91870fb80e625 100644
--- a/umbrella/src/lib.rs
+++ b/umbrella/src/lib.rs
@@ -657,6 +657,16 @@ pub use pallet_society;
 #[cfg(feature = "pallet-staking")]
 pub use pallet_staking;
 
+/// Pallet handling the communication with staking-rc-client. It's role is to glue the staking
+/// pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way.
+#[cfg(feature = "pallet-staking-ah-client")]
+pub use pallet_staking_ah_client;
+
+/// Pallet handling the communication with staking-ah-client. It's role is to glue the staking
+/// pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way.
+#[cfg(feature = "pallet-staking-rc-client")]
+pub use pallet_staking_rc_client;
+
 /// Reward Curve for FRAME staking pallet.
 #[cfg(feature = "pallet-staking-reward-curve")]
 pub use pallet_staking_reward_curve;