diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml
index 8af1dd8cef708ec2f374ef9dca7fdeec150254ad..2b68e0112685078b2f189f73d7e85672cc1941a8 100644
--- a/.github/workflows/check-prdoc.yml
+++ b/.github/workflows/check-prdoc.yml
@@ -22,49 +22,33 @@ jobs:
   check-prdoc:
     runs-on: ubuntu-latest
     timeout-minutes: 10
-    if: github.event.pull_request.number != ''
     steps:
       - name: Checkout repo
         uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc #v4.1.7
-      # we cannot show the version in this step (ie before checking out the repo)
-      # due to https://github.com/paritytech/prdoc/issues/15
-      - name: Check if PRdoc is required
-        id: get-labels
+      - name: Check prdoc format
         run: |
           echo "Pulling $IMAGE"
           $ENGINE pull $IMAGE
 
-          # Fetch the labels for the PR under test
-          echo "Fetch the labels for $API_BASE/${REPO}/pulls/${GITHUB_PR}"
-          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
-          echo "Labels: ${labels}"
-          echo "labels=${labels}" >> "$GITHUB_OUTPUT"
-
           echo "Checking PRdoc version"
           $ENGINE run --rm -v $PWD:/repo $IMAGE --version
 
-      - name: Early exit if PR is silent
-        if: ${{ contains(steps.get-labels.outputs.labels, 'R0') }}
-        run: |
-          hits=$(find prdoc -name "pr_$GITHUB_PR*.prdoc" | wc -l)
-          if (( hits > 0 )); then
-            echo "PR detected as silent, but a PRDoc was found, checking it as information"
-            $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} || echo "Ignoring failure"
-          else
-            echo "PR detected as silent, no PRDoc found, exiting..."
-          fi
-          echo "If you want to add a PRDoc, please refer to $PRDOC_DOC"
-          exit 0
+          echo "Check prdoc format"
+          echo "For PRDoc format, please refer to $PRDOC_DOC"
+          $ENGINE run --rm -v $PWD:/repo -e RUST_LOG=info $IMAGE check
 
-      - name: PRdoc check for PR#${{ github.event.pull_request.number }}
-        if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }}
+      - name: Check if PRdoc is required
+        if: github.event.pull_request.number != ''
+        id: get-labels
         run: |
-          echo "Checking for PR#${GITHUB_PR}"
-          echo "You can find more information about PRDoc at $PRDOC_DOC"
-          $ENGINE run --rm -v $PWD:/repo -e RUST_LOG=info $IMAGE check -n ${GITHUB_PR}
+          # Fetch the labels for the PR under test
+          echo "Fetch the labels for $API_BASE/${REPO}/pulls/${GITHUB_PR}"
+          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
+          echo "Labels: ${labels}"
+          echo "labels=${labels}" >> "$GITHUB_OUTPUT"
 
       - name: Validate prdoc for PR#${{ github.event.pull_request.number }}
-        if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }}
+        if: ${{ github.event.pull_request.number != '' && !contains(steps.get-labels.outputs.labels, 'R0') }}
         run: |
           echo "Validating PR#${GITHUB_PR}"
           python3 --version
diff --git a/Cargo.lock b/Cargo.lock
index 40594efe42d3a509e035238ea8bd89e3e42262eb..c6fefcfdfe186d9c1515d58a8bdfbb435c4937f4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2868,6 +2868,7 @@ version = "1.0.0"
 dependencies = [
  "asset-hub-westend-runtime",
  "bridge-hub-westend-runtime",
+ "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "emulated-integration-tests-common",
  "frame-support 28.0.0",
@@ -8186,6 +8187,19 @@ dependencies = [
  "stable_deref_trait",
 ]
 
+[[package]]
+name = "git2"
+version = "0.20.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fda788993cc341f69012feba8bf45c0ba4f3291fcc08e214b4d5a7332d88aff"
+dependencies = [
+ "bitflags 2.6.0",
+ "libc",
+ "libgit2-sys",
+ "log",
+ "url",
+]
+
 [[package]]
 name = "glob"
 version = "0.3.1"
@@ -8728,7 +8742,7 @@ dependencies = [
  "httpdate",
  "itoa",
  "pin-project-lite",
- "socket2 0.4.9",
+ "socket2 0.5.7",
  "tokio",
  "tower-service",
  "tracing",
@@ -9832,6 +9846,18 @@ dependencies = [
  "once_cell",
 ]
 
+[[package]]
+name = "libgit2-sys"
+version = "0.18.0+1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1a117465e7e1597e8febea8bb0c410f1c7fb93b1e1cddf34363f8390367ffec"
+dependencies = [
+ "cc",
+ "libc",
+ "libz-sys",
+ "pkg-config",
+]
+
 [[package]]
 name = "libloading"
 version = "0.7.4"
@@ -14954,6 +14980,7 @@ dependencies = [
  "env_logger 0.11.3",
  "ethabi",
  "futures",
+ "git2",
  "hex",
  "jsonrpsee",
  "log",
@@ -15528,6 +15555,42 @@ dependencies = [
  "sp-staking 36.0.0",
 ]
 
+[[package]]
+name = "pallet-staking-ah-client"
+version = "0.1.0"
+dependencies = [
+ "frame-support 28.0.0",
+ "frame-system 28.0.0",
+ "log",
+ "pallet-authorship 28.0.0",
+ "pallet-session 28.0.0",
+ "pallet-staking 28.0.0",
+ "pallet-staking-rc-client",
+ "parity-scale-codec",
+ "polkadot-primitives 7.0.0",
+ "polkadot-runtime-parachains 7.0.0",
+ "scale-info",
+ "sp-core 28.0.0",
+ "sp-runtime 31.0.1",
+ "sp-staking 26.0.0",
+ "staging-xcm 7.0.0",
+]
+
+[[package]]
+name = "pallet-staking-rc-client"
+version = "0.1.0"
+dependencies = [
+ "frame-support 28.0.0",
+ "frame-system 28.0.0",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core 28.0.0",
+ "sp-runtime 31.0.1",
+ "sp-staking 26.0.0",
+ "staging-xcm 7.0.0",
+]
+
 [[package]]
 name = "pallet-staking-reward-curve"
 version = "11.0.0"
@@ -16474,7 +16537,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9"
 dependencies = [
  "bitcoin_hashes 0.13.0",
  "rand",
- "rand_core 0.5.1",
+ "rand_core 0.6.4",
  "serde",
  "unicode-normalization",
 ]
@@ -18853,6 +18916,8 @@ dependencies = [
  "pallet-skip-feeless-payment 3.0.0",
  "pallet-society 28.0.0",
  "pallet-staking 28.0.0",
+ "pallet-staking-ah-client",
+ "pallet-staking-rc-client",
  "pallet-staking-reward-curve",
  "pallet-staking-reward-fn 19.0.0",
  "pallet-staking-runtime-api 14.0.0",
@@ -20736,7 +20801,7 @@ checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302"
 dependencies = [
  "bytes",
  "heck 0.5.0",
- "itertools 0.12.1",
+ "itertools 0.13.0",
  "log",
  "multimap",
  "once_cell",
@@ -20782,7 +20847,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac"
 dependencies = [
  "anyhow",
- "itertools 0.12.1",
+ "itertools 0.13.0",
  "proc-macro2 1.0.93",
  "quote 1.0.38",
  "syn 2.0.98",
diff --git a/Cargo.toml b/Cargo.toml
index 987e57d418bf73dca537ded780613535c365236d..0769a95932d443fa31772f376756edc51c0db98a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -420,6 +420,8 @@ members = [
 	"substrate/frame/session/benchmarking",
 	"substrate/frame/society",
 	"substrate/frame/staking",
+	"substrate/frame/staking/ah-client",
+	"substrate/frame/staking/rc-client",
 	"substrate/frame/staking/reward-curve",
 	"substrate/frame/staking/reward-fn",
 	"substrate/frame/staking/runtime-api",
@@ -998,6 +1000,8 @@ pallet-session-benchmarking = { path = "substrate/frame/session/benchmarking", d
 pallet-skip-feeless-payment = { path = "substrate/frame/transaction-payment/skip-feeless-payment", default-features = false }
 pallet-society = { path = "substrate/frame/society", default-features = false }
 pallet-staking = { path = "substrate/frame/staking", default-features = false }
+pallet-staking-ah-client = { path = "substrate/frame/staking/ah-client", default-features = false }
+pallet-staking-rc-client = { path = "substrate/frame/staking/rc-client", default-features = false }
 pallet-staking-reward-curve = { path = "substrate/frame/staking/reward-curve", default-features = false }
 pallet-staking-reward-fn = { path = "substrate/frame/staking/reward-fn", default-features = false }
 pallet-staking-runtime-api = { path = "substrate/frame/staking/runtime-api", default-features = false }
diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs
index 6a97525c4f576f2a956f21be3a89807699423205..002baea02d60d74e47f1508c420aa90461eeb058 100644
--- a/cumulus/pallets/collator-selection/src/mock.rs
+++ b/cumulus/pallets/collator-selection/src/mock.rs
@@ -139,6 +139,7 @@ impl pallet_session::Config for Test {
 	type SessionManager = CollatorSelection;
 	type SessionHandler = TestSessionHandler;
 	type Keys = MockSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml
index f718e7e77f597723c2a53dac3552bb103bab96d9..05c7021d380aeb0eaec892dd00cecc102c836f02 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml
@@ -37,6 +37,7 @@ pallet-bridge-messages = { workspace = true }
 # Cumulus
 asset-hub-westend-runtime = { workspace = true }
 bridge-hub-westend-runtime = { workspace = true }
+cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 emulated-integration-tests-common = { workspace = true }
 parachains-common = { workspace = true, default-features = true }
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs
index 3d4d4f58e3b54f2baf8fc9dcf5683e9b3be4532f..cd5e22372f0e645469cdee8c8fb47ab901456ea4 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs
@@ -61,8 +61,10 @@ mod imports {
 				LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub,
 				UniversalLocation as PenpalUniversalLocation,
 			},
-			PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet,
+			PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner,
+			PenpalBParaPallet as PenpalBPallet,
 		},
+		rococo_emulated_chain::RococoRelayPallet as RococoPallet,
 		westend_emulated_chain::{
 			genesis::ED as WESTEND_ED, westend_runtime::xcm_config::XcmConfig as WestendXcmConfig,
 			WestendRelayPallet as WestendPallet,
@@ -73,10 +75,11 @@ mod imports {
 		AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubRococoPara as BridgeHubRococo,
 		BridgeHubWestendPara as BridgeHubWestend,
 		BridgeHubWestendParaReceiver as BridgeHubWestendReceiver,
-		BridgeHubWestendParaSender as BridgeHubWestendSender, PenpalBPara as PenpalB,
+		BridgeHubWestendParaSender as BridgeHubWestendSender, PenpalAPara as PenpalA,
+		PenpalAParaReceiver as PenpalAReceiver, PenpalBPara as PenpalB,
 		PenpalBParaReceiver as PenpalBReceiver, PenpalBParaSender as PenpalBSender,
-		WestendRelay as Westend, WestendRelayReceiver as WestendReceiver,
-		WestendRelaySender as WestendSender,
+		RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, WestendRelay as Westend,
+		WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender,
 	};
 
 	pub const ASSET_ID: u32 = 1;
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs
index a73c1280b406a7154c05707a4dfe53946ac9c1c9..6da4de550fb5f5dcf5b949ccd5297fd4e2897f01 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs
@@ -14,6 +14,7 @@
 // limitations under the License.
 
 use crate::{create_pool_with_native_on, tests::*};
+use emulated_integration_tests_common::macros::Dmp;
 use xcm::latest::AssetTransferFilter;
 
 fn send_assets_over_bridge<F: FnOnce()>(send_fn: F) {
@@ -41,6 +42,12 @@ fn set_up_wnds_for_penpal_westend_through_ahw_to_ahr(
 	let wnd_at_westend_parachains = wnd_at_ah_westend();
 	let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo();
 	create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo.clone(), true);
+	create_pool_with_native_on!(
+		AssetHubRococo,
+		wnd_at_asset_hub_rococo.clone(),
+		true,
+		AssetHubRococoSender::get()
+	);
 
 	let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id());
 	let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location);
@@ -416,6 +423,295 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo()
 	assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + amount);
 }
 
+#[test]
+fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo_to_penpal_rococo() {
+	let amount = ASSET_HUB_WESTEND_ED * 10_000_000;
+	let sender = PenpalBSender::get();
+	let receiver = PenpalAReceiver::get();
+	let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id());
+	// create foreign WND on remote paras
+	let (wnd_at_westend_parachains, wnd_at_rococo_parachains) =
+		set_up_wnds_for_penpal_westend_through_ahw_to_ahr(&sender, amount);
+	let asset_owner: AccountId = AssetHubRococo::account_id_of(ALICE);
+	// create foreign WND on remote paras
+	PenpalA::force_create_foreign_asset(
+		wnd_at_rococo_parachains.clone(),
+		asset_owner.clone(),
+		true,
+		ASSET_MIN_BALANCE,
+		vec![],
+	);
+	// Configure destination Penpal chain to trust its sibling AH as reserve of bridged WND
+	PenpalA::execute_with(|| {
+		assert_ok!(<PenpalA as Chain>::System::set_storage(
+			<PenpalA as Chain>::RuntimeOrigin::root(),
+			vec![(
+				PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(),
+				wnd_at_rococo_parachains.encode(),
+			)],
+		));
+	});
+	create_pool_with_native_on!(PenpalA, wnd_at_rococo_parachains.clone(), true, asset_owner);
+
+	let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus(
+		ByGenesis(ROCOCO_GENESIS_HASH),
+		AssetHubRococo::para_id(),
+	);
+	let wnds_in_reserve_on_ahw_before =
+		<AssetHubWestend as Chain>::account_data_of(sov_ahr_on_ahw.clone()).free;
+	let sender_wnds_before = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(wnd_at_westend_parachains.clone(), &sender)
+	});
+	let receiver_wnds_before = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(wnd_at_rococo_parachains.clone(), &receiver)
+	});
+
+	// Send WNDs over bridge
+	{
+		let destination = asset_hub_rococo_location();
+		let assets: Assets = (wnd_at_westend_parachains.clone(), amount).into();
+		let asset_transfer_type = TransferType::RemoteReserve(local_asset_hub.clone().into());
+		let fees_id: AssetId = wnd_at_westend_parachains.clone().into();
+		let fees_transfer_type = TransferType::RemoteReserve(local_asset_hub.into());
+		let remote_fees = (bridged_wnd_at_ah_rococo(), amount / 2).into();
+		let beneficiary: Location =
+			AccountId32Junction { network: None, id: receiver.clone().into() }.into();
+		let custom_xcm_on_penpal_dest = Xcm::<()>(vec![
+			BuyExecution { fees: remote_fees, weight_limit: Unlimited },
+			DepositAsset { assets: Wild(AllCounted(assets.len() as u32)), beneficiary },
+		]);
+		let pp_loc_from_ah = AssetHubRococo::sibling_location_of(PenpalA::para_id());
+		let custom_xcm_on_remote_ah = Xcm::<()>(vec![
+			// BuyExecution { fees: remote_fees, weight_limit: Unlimited },
+			DepositReserveAsset {
+				assets: Wild(AllCounted(1)),
+				dest: pp_loc_from_ah,
+				xcm: custom_xcm_on_penpal_dest,
+			},
+		]);
+		send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah(
+			destination,
+			(assets, asset_transfer_type),
+			(fees_id, fees_transfer_type),
+			custom_xcm_on_remote_ah,
+		);
+	}
+
+	// process AHR incoming message and check events
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				// issue WNDs on AHR
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},
+				// message processed successfully
+				RuntimeEvent::MessageQueue(
+					pallet_message_queue::Event::Processed { success: true, .. }
+				) => {},
+			]
+		);
+	});
+	PenpalA::execute_with(|| {
+		PenpalA::assert_xcmp_queue_success(None);
+	});
+
+	let sender_wnds_after = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(wnd_at_westend_parachains, &sender)
+	});
+	let receiver_wnds_after = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(wnd_at_rococo_parachains, &receiver)
+	});
+	let wnds_in_reserve_on_ahw_after =
+		<AssetHubWestend as Chain>::account_data_of(sov_ahr_on_ahw.clone()).free;
+
+	// Sender's balance is reduced
+	assert!(sender_wnds_after < sender_wnds_before);
+	// Receiver's balance is increased
+	assert!(receiver_wnds_after > receiver_wnds_before);
+	// Reserve balance is increased by sent amount (less fess)
+	assert!(wnds_in_reserve_on_ahw_after > wnds_in_reserve_on_ahw_before);
+	assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + amount);
+}
+
+#[test]
+fn send_wnds_from_westend_relay_through_asset_hub_westend_to_asset_hub_rococo_to_penpal_rococo() {
+	let amount = WESTEND_ED * 1_000;
+	let sender = WestendSender::get();
+	let receiver = PenpalAReceiver::get();
+	let local_asset_hub = Westend::child_location_of(AssetHubWestend::para_id());
+
+	let wnd_at_westend_parachains = wnd_at_ah_westend();
+	let wnd_at_rococo_parachains = bridged_wnd_at_ah_rococo();
+	// create foreign WND on AH Rococo
+	create_foreign_on_ah_rococo(wnd_at_rococo_parachains.clone(), true);
+	create_pool_with_native_on!(
+		AssetHubRococo,
+		wnd_at_rococo_parachains.clone(),
+		true,
+		AssetHubRococoSender::get()
+	);
+	// create foreign WND on Penpal Rococo
+	let asset_owner: AccountId = AssetHubRococo::account_id_of(ALICE);
+	PenpalA::force_create_foreign_asset(
+		wnd_at_rococo_parachains.clone(),
+		asset_owner.clone(),
+		true,
+		ASSET_MIN_BALANCE,
+		vec![],
+	);
+	// Configure destination Penpal chain to trust its sibling AH as reserve of bridged WND
+	PenpalA::execute_with(|| {
+		assert_ok!(<PenpalA as Chain>::System::set_storage(
+			<PenpalA as Chain>::RuntimeOrigin::root(),
+			vec![(
+				PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(),
+				wnd_at_rococo_parachains.encode(),
+			)],
+		));
+	});
+	create_pool_with_native_on!(PenpalA, wnd_at_rococo_parachains.clone(), true, asset_owner);
+
+	Westend::execute_with(|| {
+		let root_origin = <Westend as Chain>::RuntimeOrigin::root();
+		<Westend as WestendPallet>::XcmPallet::force_xcm_version(
+			root_origin,
+			bx!(local_asset_hub.clone()),
+			XCM_VERSION,
+		)
+	})
+	.unwrap();
+	AssetHubRococo::force_xcm_version(
+		AssetHubRococo::sibling_location_of(PenpalA::para_id()),
+		XCM_VERSION,
+	);
+
+	let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus(
+		ByGenesis(ROCOCO_GENESIS_HASH),
+		AssetHubRococo::para_id(),
+	);
+	let wnds_in_reserve_on_ahw_before =
+		<AssetHubWestend as Chain>::account_data_of(sov_ahr_on_ahw.clone()).free;
+	let sender_wnds_before = <Westend as Chain>::account_data_of(sender.clone()).free;
+	let receiver_wnds_before = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(wnd_at_rococo_parachains.clone(), &receiver)
+	});
+
+	// Send WNDs from Westend to AHW over bridge to AHR then onto Penpal parachain
+	{
+		let beneficiary: Location =
+			AccountId32Junction { network: None, id: receiver.clone().into() }.into();
+		// executes on Westend Relay
+		let kusama_xcm = Xcm::<()>(vec![
+			WithdrawAsset((Location::here(), amount).into()),
+			SetFeesMode { jit_withdraw: true },
+			InitiateTeleport {
+				assets: Wild(AllCounted(1)),
+				dest: local_asset_hub,
+				// executes on Westend Asset Hub
+				xcm: Xcm::<()>(vec![
+					BuyExecution {
+						fees: (wnd_at_westend_parachains, amount / 2).into(),
+						weight_limit: Unlimited,
+					},
+					DepositReserveAsset {
+						assets: Wild(AllCounted(1)),
+						dest: asset_hub_rococo_location(),
+						// executes on Rococo Asset Hub
+						xcm: Xcm::<()>(vec![
+							BuyExecution {
+								fees: (wnd_at_rococo_parachains.clone(), amount / 2).into(),
+								weight_limit: Unlimited,
+							},
+							DepositReserveAsset {
+								assets: Wild(AllCounted(1)),
+								dest: AssetHubRococo::sibling_location_of(PenpalA::para_id()),
+								// executes on Rococo Penpal
+								xcm: Xcm::<()>(vec![
+									BuyExecution {
+										fees: (wnd_at_rococo_parachains.clone(), amount / 2).into(),
+										weight_limit: Unlimited,
+									},
+									DepositAsset { assets: Wild(AllCounted(1)), beneficiary },
+								]),
+							},
+						]),
+					},
+				]),
+			},
+		]);
+		send_assets_over_bridge(|| {
+			// send message over bridge
+			assert_ok!(Westend::execute_with(|| {
+				Dmp::<<Westend as Chain>::Runtime>::make_parachain_reachable(
+					AssetHubWestend::para_id(),
+				);
+				let signed_origin = <Westend as Chain>::RuntimeOrigin::signed(WestendSender::get());
+				<Westend as WestendPallet>::XcmPallet::execute(
+					signed_origin,
+					bx!(xcm::VersionedXcm::V5(kusama_xcm.into())),
+					Weight::MAX,
+				)
+			}));
+			AssetHubWestend::execute_with(|| {
+				type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+				assert_expected_events!(
+					AssetHubWestend,
+					vec![
+						// Amount deposited in AHR's sovereign account
+						RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => {
+							who: *who == sov_ahr_on_ahw.clone().into(),
+						},
+						RuntimeEvent::XcmpQueue(
+							cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }
+						) => {},
+					]
+				);
+			});
+		});
+	}
+
+	// process AHR incoming message and check events
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				// issue WNDs on AHR
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},
+				// message processed successfully
+				RuntimeEvent::MessageQueue(
+					pallet_message_queue::Event::Processed { success: true, .. }
+				) => {},
+			]
+		);
+	});
+	PenpalA::execute_with(|| {
+		PenpalA::assert_xcmp_queue_success(None);
+	});
+
+	let sender_wnds_after = <Westend as Chain>::account_data_of(sender.clone()).free;
+	let receiver_wnds_after = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(wnd_at_rococo_parachains, &receiver)
+	});
+	let wnds_in_reserve_on_ahw_after =
+		<AssetHubWestend as Chain>::account_data_of(sov_ahr_on_ahw.clone()).free;
+
+	// Sender's balance is reduced
+	assert!(sender_wnds_after < sender_wnds_before);
+	// Receiver's balance is increased
+	assert!(receiver_wnds_after > receiver_wnds_before);
+	// Reserve balance is increased by sent amount (less fess)
+	assert!(wnds_in_reserve_on_ahw_after > wnds_in_reserve_on_ahw_before);
+	assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + amount);
+}
+
 #[test]
 fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() {
 	let roc_at_westend_parachains = bridged_roc_at_ah_westend();
@@ -429,8 +725,8 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc
 
 	// set up ROCs for transfer
 	let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id());
-	let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(penpal_location);
-	let prefund_accounts = vec![(sov_penpal_on_ahr, amount * 2)];
+	let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location);
+	let prefund_accounts = vec![(sov_penpal_on_ahw, amount * 2)];
 	create_foreign_on_ah_westend(roc_at_westend_parachains.clone(), true, prefund_accounts);
 	let asset_owner: AccountId = AssetHubWestend::account_id_of(ALICE);
 	PenpalB::force_create_foreign_asset(
@@ -543,6 +839,372 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc
 	assert!(receiver_rocs_after <= receiver_rocs_before + amount);
 }
 
+#[test]
+fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo_to_penpal_rococo(
+) {
+	let roc_at_westend_parachains = bridged_roc_at_ah_westend();
+	let roc_at_rococo_parachains = Location::parent();
+	let amount = ASSET_HUB_WESTEND_ED * 10_000_000;
+	let sender = PenpalBSender::get();
+	let receiver = PenpalAReceiver::get();
+
+	// set up ROCs for transfer
+	let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id());
+	let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location);
+	let prefund_accounts = vec![(sov_penpal_on_ahw.clone(), amount * 2)];
+	create_foreign_on_ah_westend(roc_at_westend_parachains.clone(), true, prefund_accounts);
+	create_pool_with_native_on!(
+		AssetHubWestend,
+		roc_at_westend_parachains.clone(),
+		true,
+		AssetHubRococoSender::get()
+	);
+	let asset_owner: AccountId = AssetHubWestend::account_id_of(ALICE);
+	// Fund WNDs on Westend Penpal
+	PenpalB::mint_foreign_asset(
+		<PenpalB as Chain>::RuntimeOrigin::signed(PenpalAssetOwner::get()),
+		Location::parent(),
+		sender.clone(),
+		amount,
+	);
+	// Create and fund bridged ROCs on Westend Penpal
+	PenpalB::force_create_foreign_asset(
+		roc_at_westend_parachains.clone(),
+		asset_owner.clone(),
+		true,
+		ASSET_MIN_BALANCE,
+		vec![(sender.clone(), amount * 2)],
+	);
+	// Configure source Penpal chain to trust local AH as reserve of bridged ROC
+	PenpalB::execute_with(|| {
+		assert_ok!(<PenpalB as Chain>::System::set_storage(
+			<PenpalB as Chain>::RuntimeOrigin::root(),
+			vec![(
+				PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(),
+				roc_at_westend_parachains.encode(),
+			)],
+		));
+	});
+
+	// fund the AHW's SA on AHR with the ROC tokens held in reserve
+	let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus(
+		ByGenesis(WESTEND_GENESIS_HASH),
+		AssetHubWestend::para_id(),
+	);
+	AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), amount * 2)]);
+
+	// balances before
+	let sender_rocs_before = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(roc_at_westend_parachains.clone().into(), &sender)
+	});
+	let receiver_rocs_before = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(roc_at_rococo_parachains.clone(), &receiver)
+	});
+
+	// send ROCs over the bridge, all fees paid with ROC along the way
+	{
+		let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id());
+		let beneficiary: Location =
+			AccountId32Junction { network: None, id: receiver.clone().into() }.into();
+		// executes on Penpal Westend
+		let xcm = Xcm::<()>(vec![
+			WithdrawAsset((roc_at_westend_parachains.clone(), amount).into()),
+			SetFeesMode { jit_withdraw: true },
+			InitiateReserveWithdraw {
+				assets: Wild(AllCounted(1)),
+				reserve: local_asset_hub,
+				// executes on Westend Asset Hub
+				xcm: Xcm::<()>(vec![
+					BuyExecution {
+						fees: (roc_at_westend_parachains.clone(), amount / 2).into(),
+						weight_limit: Unlimited,
+					},
+					InitiateReserveWithdraw {
+						assets: Wild(AllCounted(1)),
+						reserve: asset_hub_rococo_location(),
+						// executes on Rococo Asset Hub
+						xcm: Xcm::<()>(vec![
+							BuyExecution {
+								fees: (roc_at_rococo_parachains.clone(), amount / 2).into(),
+								weight_limit: Unlimited,
+							},
+							DepositReserveAsset {
+								assets: Wild(AllCounted(1)),
+								dest: AssetHubRococo::sibling_location_of(PenpalA::para_id()),
+								// executes on Rococo Penpal
+								xcm: Xcm::<()>(vec![
+									BuyExecution {
+										fees: (roc_at_rococo_parachains.clone(), amount / 2).into(),
+										weight_limit: Unlimited,
+									},
+									DepositAsset { assets: Wild(AllCounted(1)), beneficiary },
+								]),
+							},
+						]),
+					},
+				]),
+			},
+		]);
+		send_assets_over_bridge(|| {
+			// send message over bridge
+			assert_ok!(PenpalB::execute_with(|| {
+				let signed_origin = <PenpalB as Chain>::RuntimeOrigin::signed(sender.clone());
+				<PenpalB as PenpalBPallet>::PolkadotXcm::execute(
+					signed_origin,
+					bx!(xcm::VersionedXcm::V5(xcm.into())),
+					Weight::MAX,
+				)
+			}));
+			AssetHubWestend::execute_with(|| {
+				type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+				assert_expected_events!(
+					AssetHubWestend,
+					vec![
+						// Amount to reserve transfer is withdrawn from Penpal's sovereign account
+						RuntimeEvent::ForeignAssets(
+							pallet_assets::Event::Burned { asset_id, owner, .. }
+						) => {
+							asset_id: asset_id == &roc_at_westend_parachains,
+							owner: owner == &sov_penpal_on_ahw,
+						},
+						RuntimeEvent::XcmpQueue(
+							cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }
+						) => {},
+						// message processed successfully
+						RuntimeEvent::MessageQueue(
+							pallet_message_queue::Event::Processed { success: true, .. }
+						) => {},
+					]
+				);
+			});
+		});
+	}
+
+	// process AHR incoming message and check events
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				// burn ROCs from AHW's SA on AHR
+				RuntimeEvent::Balances(
+					pallet_balances::Event::Burned { who, .. }
+				) => {
+					who: *who == sov_ahw_on_ahr.clone().into(),
+				},
+				// sent message to sibling Penpal
+				RuntimeEvent::XcmpQueue(
+					cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }
+				) => {},
+				// message processed successfully
+				RuntimeEvent::MessageQueue(
+					pallet_message_queue::Event::Processed { success: true, .. }
+				) => {},
+			]
+		);
+	});
+	PenpalA::execute_with(|| {
+		PenpalA::assert_xcmp_queue_success(None);
+	});
+
+	let sender_rocs_after = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(roc_at_westend_parachains.into(), &sender)
+	});
+	let receiver_rocs_after = PenpalA::execute_with(|| {
+		type Assets = <PenpalA as PenpalAPallet>::ForeignAssets;
+		<Assets as Inspect<_>>::balance(roc_at_rococo_parachains.clone(), &receiver)
+	});
+
+	// Sender's balance is reduced by sent "amount"
+	assert_eq!(sender_rocs_after, sender_rocs_before - amount);
+	// Receiver's balance is increased by no more than "amount"
+	assert!(receiver_rocs_after > receiver_rocs_before);
+	assert!(receiver_rocs_after <= receiver_rocs_before + amount);
+}
+
+#[test]
+fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo_to_rococo_relay(
+) {
+	let roc_at_westend_parachains = bridged_roc_at_ah_westend();
+	let roc_at_rococo_parachains = Location::parent();
+	let amount = ASSET_HUB_WESTEND_ED * 10_000_000;
+	let sender = PenpalBSender::get();
+	let receiver = RococoReceiver::get();
+
+	// set up ROCs for transfer
+	let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id());
+	let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location);
+	let prefund_accounts = vec![(sov_penpal_on_ahw.clone(), amount * 2)];
+	create_foreign_on_ah_westend(roc_at_westend_parachains.clone(), true, prefund_accounts);
+	create_pool_with_native_on!(
+		AssetHubWestend,
+		roc_at_westend_parachains.clone(),
+		true,
+		AssetHubRococoSender::get()
+	);
+	let asset_owner: AccountId = AssetHubWestend::account_id_of(ALICE);
+	// Fund WNDs on Westend Penpal
+	PenpalB::mint_foreign_asset(
+		<PenpalB as Chain>::RuntimeOrigin::signed(PenpalAssetOwner::get()),
+		Location::parent(),
+		sender.clone(),
+		amount,
+	);
+	// Create and fund bridged ROCs on Westend Penpal
+	PenpalB::force_create_foreign_asset(
+		roc_at_westend_parachains.clone(),
+		asset_owner.clone(),
+		true,
+		ASSET_MIN_BALANCE,
+		vec![(sender.clone(), amount * 2)],
+	);
+	// Configure source Penpal chain to trust local AH as reserve of bridged ROC
+	PenpalB::execute_with(|| {
+		assert_ok!(<PenpalB as Chain>::System::set_storage(
+			<PenpalB as Chain>::RuntimeOrigin::root(),
+			vec![(
+				PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(),
+				roc_at_westend_parachains.encode(),
+			)],
+		));
+	});
+
+	// fund the AHW's SA on AHR with the ROC tokens held in reserve
+	let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus(
+		ByGenesis(WESTEND_GENESIS_HASH),
+		AssetHubWestend::para_id(),
+	);
+	AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), amount * 2)]);
+
+	// fund Rococo Relay check account so we can teleport back to it
+	Rococo::fund_accounts(vec![(<Rococo as RococoPallet>::XcmPallet::check_account(), amount)]);
+
+	// balances before
+	let sender_rocs_before = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(roc_at_westend_parachains.clone().into(), &sender)
+	});
+	let receiver_rocs_before = <Rococo as Chain>::account_data_of(receiver.clone()).free;
+
+	// send ROCs over the bridge, all fees paid with ROC along the way
+	{
+		let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id());
+		let beneficiary: Location =
+			AccountId32Junction { network: None, id: receiver.clone().into() }.into();
+		// executes on Penpal Westend
+		let xcm = Xcm::<()>(vec![
+			WithdrawAsset((roc_at_westend_parachains.clone(), amount).into()),
+			SetFeesMode { jit_withdraw: true },
+			InitiateReserveWithdraw {
+				assets: Wild(AllCounted(1)),
+				reserve: local_asset_hub,
+				// executes on Westend Asset Hub
+				xcm: Xcm::<()>(vec![
+					BuyExecution {
+						fees: (roc_at_westend_parachains.clone(), amount / 2).into(),
+						weight_limit: Unlimited,
+					},
+					InitiateReserveWithdraw {
+						assets: Wild(AllCounted(1)),
+						reserve: asset_hub_rococo_location(),
+						// executes on Rococo Asset Hub
+						xcm: Xcm::<()>(vec![
+							BuyExecution {
+								fees: (roc_at_rococo_parachains.clone(), amount / 2).into(),
+								weight_limit: Unlimited,
+							},
+							InitiateTeleport {
+								assets: Wild(AllCounted(1)),
+								dest: Location::parent(),
+								// executes on Rococo Relay
+								xcm: Xcm::<()>(vec![
+									BuyExecution {
+										fees: (Location::here(), amount / 2).into(),
+										weight_limit: Unlimited,
+									},
+									DepositAsset { assets: Wild(AllCounted(1)), beneficiary },
+								]),
+							},
+						]),
+					},
+				]),
+			},
+		]);
+		send_assets_over_bridge(|| {
+			// send message over bridge
+			assert_ok!(PenpalB::execute_with(|| {
+				let signed_origin = <PenpalB as Chain>::RuntimeOrigin::signed(sender.clone());
+				<PenpalB as PenpalBPallet>::PolkadotXcm::execute(
+					signed_origin,
+					bx!(xcm::VersionedXcm::V5(xcm.into())),
+					Weight::MAX,
+				)
+			}));
+			AssetHubWestend::execute_with(|| {
+				type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+				assert_expected_events!(
+					AssetHubWestend,
+					vec![
+						// Amount to reserve transfer is withdrawn from Penpal's sovereign account
+						RuntimeEvent::ForeignAssets(
+							pallet_assets::Event::Burned { asset_id, owner, .. }
+						) => {
+							asset_id: asset_id == &roc_at_westend_parachains,
+							owner: owner == &sov_penpal_on_ahw,
+						},
+						RuntimeEvent::XcmpQueue(
+							cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }
+						) => {},
+						// message processed successfully
+						RuntimeEvent::MessageQueue(
+							pallet_message_queue::Event::Processed { success: true, .. }
+						) => {},
+					]
+				);
+			});
+		});
+	}
+
+	// process AHR incoming message and check events
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				// burn ROCs from AHW's SA on AHR
+				RuntimeEvent::Balances(
+					pallet_balances::Event::Burned { who, .. }
+				) => {
+					who: *who == sov_ahw_on_ahr.clone().into(),
+				},
+				// sent message to Rococo Relay
+				RuntimeEvent::ParachainSystem(
+					cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }
+				) => {},
+				// message processed successfully
+				RuntimeEvent::MessageQueue(
+					pallet_message_queue::Event::Processed { success: true, .. }
+				) => {},
+			]
+		);
+	});
+
+	let sender_rocs_after = PenpalB::execute_with(|| {
+		type ForeignAssets = <PenpalB as PenpalBPallet>::ForeignAssets;
+		<ForeignAssets as Inspect<_>>::balance(roc_at_westend_parachains.into(), &sender)
+	});
+	let receiver_rocs_after = <Rococo as Chain>::account_data_of(receiver.clone()).free;
+
+	// Sender's balance is reduced by sent "amount"
+	assert_eq!(sender_rocs_after, sender_rocs_before - amount);
+	// Receiver's balance is increased by no more than "amount"
+	assert!(receiver_rocs_after > receiver_rocs_before);
+	assert!(receiver_rocs_after <= receiver_rocs_before + amount);
+}
+
 #[test]
 fn dry_run_transfer_to_rococo_sends_xcm_to_bridge_hub() {
 	test_dry_run_transfer_across_pk_bridge!(
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
index 6e27aaf88b4bc7d86eab75230f6dcb4651d9a2a7..15c1a822b756c211508cc961bed835d73366dce8 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
@@ -832,6 +832,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -1158,6 +1159,10 @@ pub type Migrations = (
 		Runtime,
 		TrustBackedAssetsInstance,
 	>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 45b67d5499401e68a055336d863b1b98d6658b85..016f46d3c556a6a2dd1a545e10bf3092af3a039a 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -885,6 +885,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -1256,6 +1257,10 @@ pub type Migrations = (
 		Runtime,
 		TrustBackedAssetsInstance,
 	>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
index c940af694b220ad31ad198cd59ee650291b55dd9..140056e724a79836c3a20594735bcc356fdd2698 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
@@ -182,6 +182,10 @@ pub type Migrations = (
 		RocksDbWeight,
 	>,
 	pallet_bridge_relayers::migration::v1::MigrationToV1<Runtime, ()>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -494,6 +498,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
index 01f72fd055a257ae52426bc7ab375fc646a1db8f..83712df8295b78c1c3f9de628c50c8ddc33be698 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
@@ -169,6 +169,10 @@ pub type Migrations = (
 		ConstU32<ASSET_HUB_ID>,
 	>,
 	bridge_to_ethereum_config::migrations::MigrationForXcmV5<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -475,6 +479,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
index c662cd355c73b68740837dd59042dd849be5982c..65f3e27ae9a711ce8f39f7a9b5adad9b802d12b4 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
@@ -502,6 +502,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -772,6 +773,10 @@ type Migrations = (
 	// unreleased
 	pallet_core_fellowship::migration::MigrateV0ToV1<Runtime, AmbassadorCoreInstance>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 );
 
 /// Executive: handles dispatch to the various modules.
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
index 48c5859715f4bbf7079ec55547dca6b67be67f22..165e60361cd187f36f144b37ca71f2e4e3397762 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
@@ -116,6 +116,10 @@ pub type Migrations = (
 	// unreleased
 	cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4<Runtime>,
 	cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -362,6 +366,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = pallet_session::weights::SubstrateWeight<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
index ccf8003639acddbe78d945a11fef7e086d4f4d92..b477bb4850d1c60e1550c915bee675da480d1e21 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
@@ -127,6 +127,10 @@ pub type Migrations = (
 	pallet_broker::migration::MigrateV1ToV2<Runtime>,
 	pallet_broker::migration::MigrateV2ToV3<Runtime>,
 	pallet_broker::migration::MigrateV3ToV4<Runtime, BrokerMigrationV4BlockConversion>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -397,6 +401,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
index 3d544aea469f3844a58db2bf0857f8945cb6351e..964351575de838e42668c64a6733d4e59bbea6c9 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
@@ -127,6 +127,10 @@ pub type Migrations = (
 	pallet_broker::migration::MigrateV1ToV2<Runtime>,
 	pallet_broker::migration::MigrateV2ToV3<Runtime>,
 	pallet_broker::migration::MigrateV3ToV4<Runtime, BrokerMigrationV4BlockConversion>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -398,6 +402,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
index 68c51175415c507b3449314713c9324c83709d4e..3766626ba4f632cd8603b51583f41a2782b207bd 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
@@ -114,6 +114,10 @@ pub type UncheckedExtrinsic =
 pub type Migrations = (
 	pallet_collator_selection::migration::v2::MigrationToV2<Runtime>,
 	cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -359,6 +363,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
index 980fb8db473204cd1a41adc55310f59b5dc1529a..34ab85f3d0cdd81d593d24a96ac4a3bbb4c37144 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
@@ -113,6 +113,10 @@ pub type UncheckedExtrinsic =
 /// Migrations to apply on runtime upgrade.
 pub type Migrations = (
 	pallet_collator_selection::migration::v2::MigrationToV2<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
@@ -358,6 +362,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
index ed6e014417d457ba0b51819427ac94db557574e0..dfccf3ec9860db1c50d88ca78986010f0c805a39 100644
--- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
+++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
@@ -162,6 +162,10 @@ pub type UncheckedExtrinsic =
 pub type Migrations = (
 	pallet_balances::migration::MigrateToTrackInactive<Runtime, xcm_config::CheckingAccount>,
 	pallet_collator_selection::migration::v1::MigrateToV1<Runtime>,
+	pallet_session::migrations::v1::MigrateV0ToV1<
+		Runtime,
+		pallet_session::migrations::v1::InitOffenceSeverity<Runtime>,
+	>,
 );
 
 /// Executive: handles dispatch to the various modules.
@@ -730,6 +734,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/polkadot/runtime/common/src/claims/mod.rs b/polkadot/runtime/common/src/claims/mod.rs
index f48e40ee188789f91733cab5f6486fcc94760461..9e084688b4e558ec3917e6651e2a15204adaa4cd 100644
--- a/polkadot/runtime/common/src/claims/mod.rs
+++ b/polkadot/runtime/common/src/claims/mod.rs
@@ -130,7 +130,7 @@ impl Default for StatementKind {
 #[derive(
 	Clone, Copy, PartialEq, Eq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen,
 )]
-pub struct EthereumAddress([u8; 20]);
+pub struct EthereumAddress(pub [u8; 20]);
 
 impl Serialize for EthereumAddress {
 	fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@@ -239,11 +239,11 @@ pub mod pallet {
 
 	/// The statement kind that must be signed, if any.
 	#[pallet::storage]
-	pub(super) type Signing<T> = StorageMap<_, Identity, EthereumAddress, StatementKind>;
+	pub type Signing<T> = StorageMap<_, Identity, EthereumAddress, StatementKind>;
 
 	/// Pre-claimed Ethereum accounts, by the Account ID that they are claimed to.
 	#[pallet::storage]
-	pub(super) type Preclaims<T: Config> = StorageMap<_, Identity, T::AccountId, EthereumAddress>;
+	pub type Preclaims<T: Config> = StorageMap<_, Identity, T::AccountId, EthereumAddress>;
 
 	#[pallet::genesis_config]
 	#[derive(DefaultNoBound)]
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index 054ec2aa4a931ff89d29d1f21112eb38f22edd61..61403c001e21013c1df3c37305015f29b2b8e32a 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -482,6 +482,7 @@ impl pallet_session::Config for Runtime {
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, ValidatorManager>;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -1758,6 +1759,9 @@ pub mod migrations {
         parachains_configuration::migration::v12::MigrateToV12<Runtime>,
         parachains_on_demand::migration::MigrateV0ToV1<Runtime>,
 
+		// migrates session storage item
+		pallet_session::migrations::v1::MigrateV0ToV1<Runtime, pallet_session::migrations::v1::InitOffenceSeverity<Runtime>>,
+
         // permanent
         pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
         parachains_inclusion::migration::MigrateToV1<Runtime>,
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index c0985873532021b066afe587f60a32c2b82dcdd2..fc489e3bc685e1dd4666fa5d702cfee3313084e5 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -318,12 +318,13 @@ impl pallet_session::Config for Runtime {
 	type SessionManager = Staking;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy;
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Runtime {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Runtime>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 pallet_staking_reward_curve::build! {
@@ -401,7 +402,6 @@ impl pallet_staking::Config for Runtime {
 	type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig;
 	type EventListeners = ();
 	type WeightInfo = ();
-	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
 	type MaxValidatorSet = MaxAuthorities;
 	type MaxInvulnerables = ConstU32<20>;
 	type MaxDisabledValidators = ConstU32<100>;
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index 5d7a8f5162546c57549c52370d0d91b551a949f9..8ee9e073f162d56b623b12fb390d45b929034abe 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -532,6 +532,7 @@ impl pallet_session::Config for Runtime {
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy;
 	type WeightInfo = weights::pallet_session::WeightInfo<Runtime>;
 }
 
@@ -769,7 +770,6 @@ impl pallet_staking::Config for Runtime {
 	type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig;
 	type EventListeners = (NominationPools, DelegatedStaking);
 	type WeightInfo = weights::pallet_staking::WeightInfo<Runtime>;
-	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
 	type MaxInvulnerables = frame_support::traits::ConstU32<20>;
 	type MaxDisabledValidators = ConstU32<100>;
 }
@@ -1871,6 +1871,11 @@ pub mod migrations {
 		parachains_shared::migration::MigrateToV1<Runtime>,
 		parachains_scheduler::migration::MigrateV2ToV3<Runtime>,
 		pallet_staking::migrations::v16::MigrateV15ToV16<Runtime>,
+		pallet_staking::migrations::v17::MigrateV16ToV17<Runtime>,
+		pallet_session::migrations::v1::MigrateV0ToV1<
+			Runtime,
+			pallet_staking::migrations::v17::MigrateDisabledToSession<Runtime>,
+		>,
 		// permanent
 		pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
 	);
diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs
index f0491a1daf6c372bb34712572fc1fca3a9c0795d..add70e85fb49b02fa788e0ee6865d1c704326bcd 100644
--- a/polkadot/runtime/westend/src/weights/pallet_staking.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs
@@ -805,4 +805,8 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	fn apply_slash() -> Weight {
+		// TODO CI-FAIL: run CI bench bot
+		Weight::zero()
+	}
 }
diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs
index fc650ae55a785b08ab609feb07337d099231904c..38530fd3f5aa5c7346b4158593ffd72df1e13b39 100644
--- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs
+++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs
@@ -160,13 +160,13 @@ pub mod mock_msg_queue {
 		type XcmExecutor: ExecuteXcm<Self::RuntimeCall>;
 	}
 
-	#[pallet::call]
-	impl<T: Config> Pallet<T> {}
-
 	#[pallet::pallet]
 	#[pallet::without_storage_info]
 	pub struct Pallet<T>(_);
 
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {}
+
 	#[pallet::storage]
 	#[pallet::getter(fn parachain_id)]
 	pub(super) type ParachainId<T: Config> = StorageValue<_, ParaId, ValueQuery>;
diff --git a/prdoc/pr_6440.prdoc b/prdoc/pr_6440.prdoc
index 376e59fa752eb9870e7d6078caf8ee4310a2f0c8..406050bbf6a50a869e2d6b0daebd7a86ff5842dd 100644
--- a/prdoc/pr_6440.prdoc
+++ b/prdoc/pr_6440.prdoc
@@ -6,3 +6,4 @@ doc:
 crates:
 - name: polkadot-node-core-pvf
   validate: false
+  bump: none
diff --git a/prdoc/pr_6455.prdoc b/prdoc/pr_6455.prdoc
index 9a83048e2fd292cdd71e3504bbfd4f94a4e3dfa3..d998473133569f87b1a7d4c80226863416ccfbb0 100644
--- a/prdoc/pr_6455.prdoc
+++ b/prdoc/pr_6455.prdoc
@@ -6,3 +6,4 @@ doc:
 crates:
 - name: sc-network
   validate: false
+  bump: none
diff --git a/prdoc/pr_6549.prdoc b/prdoc/pr_6549.prdoc
deleted file mode 100644
index 61a64c72418576b6e35b7ae235e2690fa28b8ccf..0000000000000000000000000000000000000000
--- a/prdoc/pr_6549.prdoc
+++ /dev/null
@@ -1,247 +0,0 @@
-doc: []
-
-crates:
-  - name: polkadot-sdk
-    bump: none
-  - name: asset-test-utils
-    bump: none
-  - name: cumulus-pallet-parachain-system
-    bump: none
-  - name: cumulus-pallet-parachain-system-proc-macro
-    bump: none
-  - name: cumulus-primitives-core
-    bump: none
-  - name: polkadot-core-primitives
-    bump: none
-  - name: polkadot-parachain-primitives
-    bump: none
-  - name: polkadot-primitives
-    bump: none
-  - name: staging-xcm
-    bump: none
-  - name: xcm-procedural
-    bump: none
-  - name: cumulus-primitives-parachain-inherent
-    bump: none
-  - name: cumulus-primitives-proof-size-hostfunction
-    bump: none
-  - name: polkadot-runtime-common
-    bump: none
-  - name: polkadot-runtime-parachains
-    bump: none
-  - name: polkadot-runtime-metrics
-    bump: none
-  - name: staging-xcm-executor
-    bump: none
-  - name: slot-range-helper
-    bump: none
-  - name: staging-xcm-builder
-    bump: none
-  - name: pallet-xcm
-    bump: none
-  - name: cumulus-primitives-storage-weight-reclaim
-    bump: none
-  - name: cumulus-pallet-aura-ext
-    bump: none
-  - name: cumulus-primitives-aura
-    bump: none
-  - name: staging-parachain-info
-    bump: none
-  - name: cumulus-test-relay-sproof-builder
-    bump: none
-  - name: cumulus-client-cli
-    bump: none
-  - name: cumulus-client-collator
-    bump: none
-  - name: cumulus-client-consensus-common
-    bump: none
-  - name: cumulus-client-pov-recovery
-    bump: none
-  - name: cumulus-relay-chain-interface
-    bump: none
-  - name: polkadot-overseer
-    bump: none
-  - name: tracing-gum
-    bump: none
-  - name: tracing-gum-proc-macro
-    bump: none
-  - name: polkadot-node-metrics
-    bump: none
-  - name: polkadot-node-primitives
-    bump: none
-  - name: polkadot-erasure-coding
-    bump: none
-  - name: polkadot-node-subsystem
-    bump: none
-  - name: polkadot-node-subsystem-types
-    bump: none
-  - name: polkadot-node-network-protocol
-    bump: none
-  - name: polkadot-statement-table
-    bump: none
-  - name: polkadot-rpc
-    bump: none
-  - name: polkadot-service
-    bump: none
-  - name: cumulus-client-parachain-inherent
-    bump: none
-  - name: westend-runtime
-    bump: none
-  - name: pallet-xcm-benchmarks
-    bump: none
-  - name: westend-runtime-constants
-    bump: none
-  - name: polkadot-approval-distribution
-    bump: none
-  - name: polkadot-node-subsystem-util
-    bump: none
-  - name: polkadot-availability-bitfield-distribution
-    bump: none
-  - name: polkadot-availability-distribution
-    bump: none
-  - name: polkadot-availability-recovery
-    bump: none
-  - name: polkadot-node-core-approval-voting
-    bump: none
-  - name: polkadot-node-core-approval-voting-parallel
-    bump: none
-  - name: polkadot-node-core-av-store
-    bump: none
-  - name: polkadot-node-core-chain-api
-    bump: none
-  - name: polkadot-statement-distribution
-    bump: none
-  - name: polkadot-collator-protocol
-    bump: none
-  - name: polkadot-dispute-distribution
-    bump: none
-  - name: polkadot-gossip-support
-    bump: none
-  - name: polkadot-network-bridge
-    bump: none
-  - name: polkadot-node-collation-generation
-    bump: none
-  - name: polkadot-node-core-backing
-    bump: none
-  - name: polkadot-node-core-bitfield-signing
-    bump: none
-  - name: polkadot-node-core-candidate-validation
-    bump: none
-  - name: polkadot-node-core-pvf
-    bump: none
-  - name: polkadot-node-core-pvf-common
-    bump: none
-  - name: polkadot-node-core-pvf-execute-worker
-    bump: none
-  - name: polkadot-node-core-pvf-prepare-worker
-    bump: none
-  - name: staging-tracking-allocator
-    bump: none
-  - name: rococo-runtime
-    bump: none
-  - name: rococo-runtime-constants
-    bump: none
-  - name: polkadot-node-core-chain-selection
-    bump: none
-  - name: polkadot-node-core-dispute-coordinator
-    bump: none
-  - name: polkadot-node-core-parachains-inherent
-    bump: none
-  - name: polkadot-node-core-prospective-parachains
-    bump: none
-  - name: polkadot-node-core-provisioner
-    bump: none
-  - name: polkadot-node-core-pvf-checker
-    bump: none
-  - name: polkadot-node-core-runtime-api
-    bump: none
-  - name: cumulus-client-network
-    bump: none
-  - name: cumulus-relay-chain-inprocess-interface
-    bump: none
-  - name: polkadot-cli
-    bump: none
-  - name: cumulus-client-consensus-aura
-    bump: none
-  - name: cumulus-client-consensus-proposer
-    bump: none
-  - name: cumulus-client-consensus-relay-chain
-    bump: none
-  - name: cumulus-client-service
-    bump: none
-  - name: cumulus-relay-chain-minimal-node
-    bump: none
-  - name: cumulus-relay-chain-rpc-interface
-    bump: none
-  - name: parachains-common
-    bump: none
-  - name: cumulus-primitives-utility
-    bump: none
-  - name: cumulus-pallet-xcmp-queue
-    bump: none
-  - name: parachains-runtimes-test-utils
-    bump: none
-  - name: assets-common
-    bump: none
-  - name: bridge-hub-common
-    bump: none
-  - name: bridge-hub-test-utils
-    bump: none
-  - name: cumulus-pallet-solo-to-para
-    bump: none
-  - name: cumulus-pallet-xcm
-    bump: none
-  - name: cumulus-ping
-    bump: none
-  - name: cumulus-primitives-timestamp
-    bump: none
-  - name: emulated-integration-tests-common
-    bump: none
-  - name: xcm-emulator
-    bump: none
-  - name: pallet-collective-content
-    bump: none
-  - name: xcm-simulator
-    bump: none
-  - name: pallet-revive-fixtures
-    bump: none
-  - name: polkadot-omni-node-lib
-    bump: none
-  - name: snowbridge-runtime-test-common
-    bump: none
-  - name: testnet-parachains-constants
-    bump: none
-  - name: asset-hub-rococo-runtime
-    bump: none
-  - name: asset-hub-westend-runtime
-    bump: none
-  - name: bridge-hub-rococo-runtime
-    bump: none
-  - name: bridge-hub-westend-runtime
-    bump: none
-  - name: collectives-westend-runtime
-    bump: none
-  - name: coretime-rococo-runtime
-    bump: none
-  - name: coretime-westend-runtime
-    bump: none
-  - name: people-rococo-runtime
-    bump: none
-  - name: people-westend-runtime
-    bump: none
-  - name: contracts-rococo-runtime
-    bump: none
-  - name: glutton-westend-runtime
-    bump: none
-  - name: rococo-parachain-runtime
-    bump: none
-  - name: polkadot-omni-node
-    bump: none
-  - name: polkadot-parachain-bin
-    bump: none
-  - name: polkadot
-    bump: none
-  - name: polkadot-voter-bags
-    bump: none
-  - name: xcm-simulator-example
-    bump: none
diff --git a/prdoc/pr_6636.prdoc b/prdoc/pr_6636.prdoc
index 1db5fd54d97168675802b570685a0c92610ccb8c..159685f5a5ce49bd22c94bb89f1b6db59cf9ff01 100644
--- a/prdoc/pr_6636.prdoc
+++ b/prdoc/pr_6636.prdoc
@@ -7,3 +7,4 @@ doc:
 crates:
 - name: sc-network
   validate: false
+  bump: none
diff --git a/prdoc/pr_6988.prdoc b/prdoc/pr_6988.prdoc
deleted file mode 100644
index 18f70f9fd97f1f316bec59a8072e89a8acec1c8b..0000000000000000000000000000000000000000
--- a/prdoc/pr_6988.prdoc
+++ /dev/null
@@ -1,5 +0,0 @@
-doc: []
-
-crates:
-  - name: polkadot
-    bump: none
\ No newline at end of file
diff --git a/prdoc/pr_7014.prdoc b/prdoc/pr_7014.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..e4e0214480a3ed420d402d21395f5e9eb50505f9
--- /dev/null
+++ b/prdoc/pr_7014.prdoc
@@ -0,0 +1,24 @@
+title: Remove `yamux_window_size` from network config
+doc:
+- audience: Node Dev
+  description: |-
+    # Description
+
+    resolve #6468
+
+
+
+    # Checklist
+
+    * [x] My PR includes a detailed description as outlined in the "Description" and its two subsections above.
+    * [ ] My PR follows the [labeling requirements](
+    https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#Process
+    ) of this project (at minimum one label for `T` required)
+        * External contributors: ask maintainers to put the right label on your PR.
+    * [ ] I have made corresponding changes to the documentation (if applicable)
+    * [ ] I have added tests that prove my fix is effective or that my feature works (if applicable)
+crates:
+- name: sc-cli
+  bump: major
+- name: sc-network
+  bump: major
diff --git a/prdoc/pr_7424.prdoc b/prdoc/pr_7424.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..e177f41371bc6492ad1aeb239e120b5b556df5d7
--- /dev/null
+++ b/prdoc/pr_7424.prdoc
@@ -0,0 +1,37 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: 'Bounded Slashing: Paginated Offence Processing & Slash Application'
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      This PR refactors the slashing mechanism in `pallet-staking` to be bounded by introducing paged offence processing and paged slash application.
+
+            ### Key Changes
+            - Offences are queued instead of being processed immediately.
+            - Slashes are computed in pages, stored as a `StorageDoubleMap` with `(Validator, SlashFraction, PageIndex)` to uniquely identify them.
+            - Slashes are applied incrementally across multiple blocks instead of a single unbounded operation.
+            - New storage items: `OffenceQueue`, `ProcessingOffence`, `OffenceQueueEras`.
+            - Updated API for cancelling and applying slashes.
+            - Preliminary benchmarks added; further optimizations planned.
+
+            This enables staking slashing to scale efficiently and removes a major blocker for staking migration to a parachain (AH).
+
+crates:
+- name: pallet-babe
+  bump: patch
+- name: pallet-staking
+  bump: major
+- name: pallet-grandpa
+  bump: patch
+- name: westend-runtime
+  bump: minor
+- name: pallet-beefy
+  bump: patch
+- name: pallet-offences-benchmarking
+  bump: patch
+- name: pallet-session-benchmarking
+  bump: patch
+- name: pallet-root-offences
+  bump: patch
\ No newline at end of file
diff --git a/prdoc/pr_7494.prdoc b/prdoc/pr_7494.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..b0e1ec0266552e44b74ccadd9fd4b8f375751036
--- /dev/null
+++ b/prdoc/pr_7494.prdoc
@@ -0,0 +1,18 @@
+title: Enhance libp2p logging targets for granular control
+
+doc:
+  - audience: [Node Dev, Node Operator]
+    description: |
+      This PR modifies the libp2p networking-specific log targets for granular control (e.g., just enabling trace for req-resp).
+
+      Previously, all logs were outputted to `sub-libp2p` target, flooding the log messages on busy validators.
+        - Discovery: `sub-libp2p::discovery`;
+        - Notification/behaviour: `sub-libp2p::notification::behaviour`;
+        - Notification/handler: `sub-libp2p::notification::handler`;
+        - Notification/service: `sub-libp2p::notification::service`;
+        - Notification/upgrade: `sub-libp2p::notification::upgrade`;
+        - Request response: `sub-libp2p::request-response`.
+
+crates:
+  - name: sc-network
+    bump: patch
diff --git a/prdoc/pr_7579.prdoc b/prdoc/pr_7579.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..3f7cbda6492935f3c92cffbbbead8575b172831f
--- /dev/null
+++ b/prdoc/pr_7579.prdoc
@@ -0,0 +1,57 @@
+title: '[AHM] Make pallet types public'
+doc:
+- audience: Runtime Dev
+  description: Preparation for AHM and making stuff public.
+crates:
+- name: cumulus-pallet-dmp-queue
+  bump: minor
+- name: cumulus-pallet-xcm
+  bump: minor
+- name: polkadot-runtime-common
+  bump: minor
+- name: polkadot-runtime-parachains
+  bump: minor
+- name: pallet-bags-list
+  bump: minor
+- name: pallet-conviction-voting
+  bump: minor
+- name: pallet-fast-unstake
+  bump: minor
+- name: pallet-multisig
+  bump: minor
+- name: pallet-nomination-pools
+  bump: minor
+- name: pallet-preimage
+  bump: minor
+- name: pallet-scheduler
+  bump: minor
+- name: pallet-vesting
+  bump: minor
+- name: staging-parachain-info
+  bump: minor
+- name: xcm-simulator
+  bump: minor
+- name: pallet-asset-conversion
+  bump: minor
+- name: pallet-assets-freezer
+  bump: minor
+- name: pallet-assets
+  bump: minor
+- name: pallet-authority-discovery
+  bump: minor
+- name: pallet-core-fellowship
+  bump: minor
+- name: pallet-delegated-staking
+  bump: minor
+- name: pallet-example-view-functions
+  bump: minor
+- name: pallet-salary
+  bump: minor
+- name: pallet-society
+  bump: minor
+- name: frame-support
+  bump: minor
+- name: pallet-treasury
+  bump: minor
+- name: pallet-uniques
+  bump: minor
diff --git a/prdoc/pr_7580.prdoc b/prdoc/pr_7580.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..ba041355506e485d3b1543b8918320d07a7e9f6b
--- /dev/null
+++ b/prdoc/pr_7580.prdoc
@@ -0,0 +1,10 @@
+title: implement web3_clientVersion
+doc:
+- audience: Runtime Dev
+  description: |-
+    Implements the `web3_clientVersion`  method. This is a common requirement for external Ethereum libraries when querying a client.
+
+    Reference issue with more details: https://github.com/paritytech/contract-issues/issues/26.
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/prdoc/pr_7581.prdoc b/prdoc/pr_7581.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..04ee5fbbff230198012ce8f2750c826a96286e9f
--- /dev/null
+++ b/prdoc/pr_7581.prdoc
@@ -0,0 +1,65 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Move validator disabling logic to pallet-session
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      This decouples disabling logic from staking, and moves it to session. This ensures validators can be disabled
+      directly when staking transitions to the system parachain and offences are reported on RC, eliminating
+      cross-network hops.
+
+crates:
+- name: pallet-staking
+  bump: major
+- name: pallet-session
+  bump: major
+- name: pallet-authority-discovery
+  bump: patch
+- name: pallet-authority-discovery
+  bump: patch
+- name: pallet-babe
+  bump: patch
+- name: pallet-grandpa
+  bump: patch
+- name: westend-runtime
+  bump: minor
+- name: pallet-beefy
+  bump: patch
+- name: pallet-beefy-mmr
+  bump: patch
+- name: pallet-offences-benchmarking
+  bump: patch
+- name: pallet-im-online
+  bump: patch
+- name: pallet-session-benchmarking
+  bump: patch
+- name: rococo-runtime
+  bump: minor
+- name: pallet-collator-selection
+  bump: patch
+- name: pallet-root-offences
+  bump: patch
+- name: asset-hub-rococo-runtime
+  bump: minor
+- name: asset-hub-westend-runtime
+  bump: minor
+- name: bridge-hub-rococo-runtime
+  bump: minor
+- name: bridge-hub-westend-runtime
+  bump: minor
+- name: collectives-westend-runtime
+  bump: minor
+- name: coretime-rococo-runtime
+  bump: minor
+- name: coretime-westend-runtime
+  bump: minor
+- name: people-rococo-runtime
+  bump: minor
+- name: people-westend-runtime
+  bump: minor
+- name: penpal-runtime
+  bump: minor
+- name: contracts-rococo-runtime
+  bump: minor
diff --git a/prdoc/pr_7582.prdoc b/prdoc/pr_7582.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..26e594c4373f2e67db0ecbeb357c16c6913238a7
--- /dev/null
+++ b/prdoc/pr_7582.prdoc
@@ -0,0 +1,17 @@
+title: Implementation of `ah-client` and `rc-client` staking pallets
+doc:
+- audience: Runtime Dev
+  description: |-
+    This PR introduces the initial structure for `pallet-ah-client` and `pallet-rc-client`. These
+    pallets will reside on the relay chain and AssetHub, respectively, and will manage the interaction
+    between `pallet-session` on the relay chain and `pallet-staking` on AssetHub.
+    Both pallets are experimental and not intended for production use.
+crates:
+- name: pallet-staking-ah-client
+  bump: major
+- name: pallet-staking-rc-client
+  bump: major
+- name: pallet-election-provider-multi-block
+  bump: minor
+- name: pallet-staking
+  bump: major
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index bebf48618ba5c9c785a25091998627858c398dde..d90d103fdedc20a1eff3b839fe62b581bc11c5cb 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -676,8 +676,6 @@ impl_opaque_keys! {
 
 #[cfg(feature = "staking-playground")]
 pub mod staking_playground {
-	use pallet_staking::Exposure;
-
 	use super::*;
 
 	/// An adapter to make the chain work with --dev only, even though it is running a large staking
@@ -712,61 +710,43 @@ pub mod staking_playground {
 		}
 	}
 
-	impl pallet_session::historical::SessionManager<AccountId, Exposure<AccountId, Balance>>
-		for AliceAsOnlyValidator
-	{
+	impl pallet_session::historical::SessionManager<AccountId, ()> for AliceAsOnlyValidator {
 		fn end_session(end_index: sp_staking::SessionIndex) {
-			<Staking as pallet_session::historical::SessionManager<
-				AccountId,
-				Exposure<AccountId, Balance>,
-			>>::end_session(end_index)
+			<Staking as pallet_session::historical::SessionManager<AccountId, ()>>::end_session(
+				end_index,
+			)
 		}
 
-		fn new_session(
-			new_index: sp_staking::SessionIndex,
-		) -> Option<Vec<(AccountId, Exposure<AccountId, Balance>)>> {
-			<Staking as pallet_session::historical::SessionManager<
-				AccountId,
-				Exposure<AccountId, Balance>,
-			>>::new_session(new_index)
+		fn new_session(new_index: sp_staking::SessionIndex) -> Option<Vec<(AccountId, ())>> {
+			<Staking as pallet_session::historical::SessionManager<AccountId, ()>>::new_session(
+				new_index,
+			)
 			.map(|_ignored| {
 				// construct a fake exposure for alice.
-				vec![(
-					sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(),
-					pallet_staking::Exposure {
-						total: 1_000_000_000,
-						own: 1_000_000_000,
-						others: vec![],
-					},
-				)]
+				vec![(sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(), ())]
 			})
 		}
 
 		fn new_session_genesis(
 			new_index: sp_staking::SessionIndex,
-		) -> Option<Vec<(AccountId, Exposure<AccountId, Balance>)>> {
+		) -> Option<Vec<(AccountId, ())>> {
 			<Staking as pallet_session::historical::SessionManager<
 				AccountId,
-				Exposure<AccountId, Balance>,
+				(),
 			>>::new_session_genesis(new_index)
 			.map(|_ignored| {
 				// construct a fake exposure for alice.
 				vec![(
 					sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(),
-					pallet_staking::Exposure {
-						total: 1_000_000_000,
-						own: 1_000_000_000,
-						others: vec![],
-					},
+					(),
 				)]
 			})
 		}
 
 		fn start_session(start_index: sp_staking::SessionIndex) {
-			<Staking as pallet_session::historical::SessionManager<
-				AccountId,
-				Exposure<AccountId, Balance>,
-			>>::start_session(start_index)
+			<Staking as pallet_session::historical::SessionManager<AccountId, ()>>::start_session(
+				start_index,
+			)
 		}
 	}
 }
@@ -779,6 +759,8 @@ impl pallet_session::Config for Runtime {
 	type NextSessionRotation = Babe;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy;
+
 	type WeightInfo = pallet_session::weights::SubstrateWeight<Runtime>;
 	#[cfg(not(feature = "staking-playground"))]
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
@@ -790,8 +772,8 @@ impl pallet_session::Config for Runtime {
 }
 
 impl pallet_session::historical::Config for Runtime {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Runtime>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 pallet_staking_reward_curve::build! {
@@ -894,7 +876,6 @@ impl pallet_staking::Config for Runtime {
 	type EventListeners = (NominationPools, DelegatedStaking);
 	type WeightInfo = pallet_staking::weights::SubstrateWeight<Runtime>;
 	type BenchmarkingConfig = StakingBenchmarkingConfig;
-	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
 	type MaxInvulnerables = ConstU32<20>;
 	type MaxDisabledValidators = ConstU32<100>;
 }
diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs
index 748b84a50d2ae1cc60a8b5195e14e17615bc865a..c9be0b48d3443927a4267d53658994fc98f339db 100644
--- a/substrate/client/cli/src/params/network_params.rs
+++ b/substrate/client/cli/src/params/network_params.rs
@@ -275,7 +275,6 @@ impl NetworkParams {
 			allow_non_globals_in_dht,
 			kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths,
 			kademlia_replication_factor: self.kademlia_replication_factor,
-			yamux_window_size: None,
 			ipfs_server: self.ipfs_server,
 			sync_mode: self.sync.into(),
 			network_backend: self.network_backend.into(),
diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs
index 327f79fe6c1306d3ca5f6912b480ea84ffcdb689..1a64f06e74c210f26994a90e54f28801f2a93295 100644
--- a/substrate/client/network/src/config.rs
+++ b/substrate/client/network/src/config.rs
@@ -650,27 +650,6 @@ pub struct NetworkConfiguration {
 	/// Enable serving block data over IPFS bitswap.
 	pub ipfs_server: bool,
 
-	/// Size of Yamux receive window of all substreams. `None` for the default (256kiB).
-	/// Any value less than 256kiB is invalid.
-	///
-	/// # Context
-	///
-	/// By design, notifications substreams on top of Yamux connections only allow up to `N` bytes
-	/// to be transferred at a time, where `N` is the Yamux receive window size configurable here.
-	/// This means, in practice, that every `N` bytes must be acknowledged by the receiver before
-	/// the sender can send more data. The maximum bandwidth of each notifications substream is
-	/// therefore `N / round_trip_time`.
-	///
-	/// It is recommended to leave this to `None`, and use a request-response protocol instead if
-	/// a large amount of data must be transferred. The reason why the value is configurable is
-	/// that some Substrate users mis-use notification protocols to send large amounts of data.
-	/// As such, this option isn't designed to stay and will likely get removed in the future.
-	///
-	/// Note that configuring a value here isn't a modification of the Yamux protocol, but rather
-	/// a modification of the way the implementation works. Different nodes with different
-	/// configured values remain compatible with each other.
-	pub yamux_window_size: Option<u32>,
-
 	/// Networking backend used for P2P communication.
 	pub network_backend: NetworkBackendType,
 }
@@ -703,7 +682,6 @@ impl NetworkConfiguration {
 			kademlia_disjoint_query_paths: false,
 			kademlia_replication_factor: NonZeroUsize::new(DEFAULT_KADEMLIA_REPLICATION_FACTOR)
 				.expect("value is a constant; constant is non-zero; qed."),
-			yamux_window_size: None,
 			ipfs_server: false,
 			network_backend: NetworkBackendType::Libp2p,
 		}
diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs
index 917449cf228c6ef8e8864699d5241f80158f25b7..6b958de86918f8bfa3e150c6aea150a13d1dc85c 100644
--- a/substrate/client/network/src/discovery.rs
+++ b/substrate/client/network/src/discovery.rs
@@ -84,6 +84,9 @@ use std::{
 	time::{Duration, Instant},
 };
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::discovery";
+
 /// Maximum number of known external addresses that we will cache.
 /// This only affects whether we will log whenever we (re-)discover
 /// a given address.
@@ -262,7 +265,7 @@ impl DiscoveryConfig {
 				match TokioMdns::new(mdns::Config::default(), local_peer_id) {
 					Ok(mdns) => Toggle::from(Some(mdns)),
 					Err(err) => {
-						warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err);
+						warn!(target: LOG_TARGET, "Failed to initialize mDNS: {:?}", err);
 						Toggle::from(None)
 					},
 				}
@@ -375,7 +378,7 @@ impl DiscoveryBehaviour {
 		if let Some(kademlia) = self.kademlia.as_mut() {
 			if !self.allow_non_globals_in_dht && !Self::can_add_to_dht(&addr) {
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Ignoring self-reported non-global address {} from {}.", addr, peer_id
 				);
 				return
@@ -393,7 +396,7 @@ impl DiscoveryBehaviour {
 					.expect("kademlia protocol was checked above to be enabled; qed")
 			}) {
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Ignoring self-reported address {} from {} as remote node is not part of the \
 					 Kademlia DHT supported by the local node.", addr, peer_id,
 				);
@@ -401,7 +404,7 @@ impl DiscoveryBehaviour {
 			}
 
 			trace!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"Adding self-reported address {} from {} to Kademlia DHT.",
 				addr, peer_id
 			);
@@ -425,7 +428,7 @@ impl DiscoveryBehaviour {
 	pub fn put_value(&mut self, key: RecordKey, value: Vec<u8>) {
 		if let Some(k) = self.kademlia.as_mut() {
 			if let Err(e) = k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) {
-				warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e);
+				warn!(target: LOG_TARGET, "Libp2p => Failed to put record: {:?}", e);
 				self.pending_events
 					.push_back(DiscoveryOut::ValuePutFailed(key.clone(), Duration::from_secs(0)));
 			}
@@ -444,7 +447,7 @@ impl DiscoveryBehaviour {
 		if let Some(kad) = self.kademlia.as_mut() {
 			if update_local_storage {
 				if let Err(_e) = kad.store_mut().put(record.clone()) {
-					warn!(target: "sub-libp2p", "Failed to update local starage");
+					warn!(target: LOG_TARGET, "Failed to update local starage");
 				}
 			}
 
@@ -462,7 +465,7 @@ impl DiscoveryBehaviour {
 	pub fn start_providing(&mut self, key: RecordKey) {
 		if let Some(kad) = self.kademlia.as_mut() {
 			if let Err(e) = kad.start_providing(key.clone()) {
-				warn!(target: "sub-libp2p", "Libp2p => Failed to start providing {key:?}: {e}.");
+				warn!(target: LOG_TARGET, "Libp2p => Failed to start providing {key:?}: {e}.");
 				self.pending_events.push_back(DiscoveryOut::StartProvidingFailed(key));
 			}
 		}
@@ -498,7 +501,7 @@ impl DiscoveryBehaviour {
 				expires,
 			}) {
 				debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Failed to store record with key: {:?}",
 					err
 				);
@@ -712,7 +715,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 			});
 		}
 
-		trace!(target: "sub-libp2p", "Addresses of {:?}: {:?}", peer_id, list);
+		trace!(target: LOG_TARGET, "Addresses of {:?}: {:?}", peer_id, list);
 
 		Ok(list.into_iter().collect())
 	}
@@ -781,7 +784,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 				if let Some(Protocol::P2p(peer_id)) = addr.iter().last() {
 					if peer_id != self.local_peer_id {
 						warn!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"🔍 Discovered external address for a peer that is not us: {addr}",
 						);
 						// Ensure this address is not propagated to kademlia.
@@ -796,7 +799,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 					// in which case we just want to refrain from logging.
 					if self.known_external_addresses.insert(address.clone()) {
 						info!(
-						  target: "sub-libp2p",
+						  target: LOG_TARGET,
 						  "🔍 Discovered new external address for our node: {address}",
 						);
 					}
@@ -805,7 +808,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 				self.kademlia.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e));
 			},
 			event => {
-				debug!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}");
+				debug!(target: LOG_TARGET, "New unknown `FromSwarm` libp2p event: {event:?}");
 				self.kademlia.on_swarm_event(event);
 			},
 		}
@@ -834,7 +837,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						if self.num_connections < self.discovery_only_if_under_num {
 							let random_peer_id = PeerId::random();
 							debug!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p <= Starting random Kademlia request for {:?}",
 								random_peer_id,
 							);
@@ -842,7 +845,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 							true
 						} else {
 							debug!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Kademlia paused due to high number of connections ({})",
 								self.num_connections
 							);
@@ -899,20 +902,20 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 					} => match res {
 						Err(GetClosestPeersError::Timeout { key, peers }) => {
 							debug!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Query for {:?} timed out with {} results",
 								HexDisplay::from(&key), peers.len(),
 							);
 						},
 						Ok(ok) => {
 							trace!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Query for {:?} yielded {:?} results",
 								HexDisplay::from(&ok.key), ok.peers.len(),
 							);
 							if ok.peers.is_empty() && self.num_connections != 0 {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Random Kademlia query has yielded empty results",
 								);
 							}
@@ -927,7 +930,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						let ev = match res {
 							Ok(GetRecordOk::FoundRecord(r)) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Found record ({:?}) with value: {:?} id {:?} stats {:?}",
 									r.record.key,
 									r.record.value,
@@ -959,7 +962,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 								cache_candidates,
 							}) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Finished with no-additional-record {:?} stats {:?} took {:?} ms",
 									id,
 									stats,
@@ -986,7 +989,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 							},
 							Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => {
 								trace!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Failed to get record: {:?}",
 									e,
 								);
@@ -997,7 +1000,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 							},
 							Err(e) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Failed to get record: {:?}",
 									e,
 								);
@@ -1018,7 +1021,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						let ev = match res {
 							Ok(GetProvidersOk::FoundProviders { key, providers }) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Found providers {:?} for key {:?}, id {:?}, stats {:?}",
 									providers,
 									key,
@@ -1036,7 +1039,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 								closest_peers: _,
 							}) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Finished with no additional providers {:?}, stats {:?}, took {:?} ms",
 									id,
 									stats,
@@ -1047,7 +1050,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 							},
 							Err(GetProvidersError::Timeout { key, closest_peers: _ }) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Failed to get providers for {key:?} due to timeout.",
 								);
 
@@ -1069,7 +1072,7 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 								DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_default()),
 							Err(e) => {
 								debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Libp2p => Failed to put record: {:?}",
 									e,
 								);
@@ -1086,12 +1089,12 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						..
 					} => match res {
 						Ok(ok) => debug!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Libp2p => Record republished: {:?}",
 							ok.key,
 						),
 						Err(e) => debug!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Libp2p => Republishing of record {:?} failed with: {:?}",
 							e.key(), e,
 						),
@@ -1101,20 +1104,20 @@ impl NetworkBehaviour for DiscoveryBehaviour {
 						..
 					} => match res {
 						Ok(ok) => debug!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Libp2p => DHT bootstrap progressed: {ok:?}",
 						),
 						Err(e) => warn!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Libp2p => DHT bootstrap error: {e:?}",
 						),
 					},
 					// We never start any other type of query.
 					KademliaEvent::OutboundQueryProgressed { result: e, .. } => {
-						warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e)
+						warn!(target: LOG_TARGET, "Libp2p => Unhandled Kademlia event: {:?}", e)
 					},
 					Event::ModeChanged { new_mode } => {
-						debug!(target: "sub-libp2p", "Libp2p => Kademlia mode changed: {new_mode}")
+						debug!(target: LOG_TARGET, "Libp2p => Kademlia mode changed: {new_mode}")
 					},
 				},
 				ToSwarm::Dial { opts } => return Poll::Ready(ToSwarm::Dial { opts }),
diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs
index e8e132228ca8f82d1a32ff2508b5fe84efc242ca..4f105936ac5632d40a648a2fe11857dc3d110c48 100644
--- a/substrate/client/network/src/litep2p/mod.rs
+++ b/substrate/client/network/src/litep2p/mod.rs
@@ -321,10 +321,6 @@ impl Litep2pNetworkBackend {
 			yamux_config.set_window_update_mode(litep2p::yamux::WindowUpdateMode::OnRead);
 			yamux_config.set_max_buffer_size(yamux_maximum_buffer_size);
 
-			if let Some(yamux_window_size) = config.network_config.yamux_window_size {
-				yamux_config.set_receive_window(yamux_window_size);
-			}
-
 			yamux_config
 		};
 
diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs
index e6909fcdefeaf874b5014dc24f41174182767b81..217ef304bd0fc6c96fffc33fdb08b8ae38a1c47d 100644
--- a/substrate/client/network/src/protocol/notifications/behaviour.rs
+++ b/substrate/client/network/src/protocol/notifications/behaviour.rs
@@ -60,13 +60,13 @@ use std::{
 	time::{Duration, Instant},
 };
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::notification::behaviour";
+
 /// Type representing a pending substream validation.
 type PendingInboundValidation =
 	BoxFuture<'static, (Result<ValidationResult, RecvError>, IncomingIndex)>;
 
-/// Logging target for the file.
-const LOG_TARGET: &str = "sub-libp2p";
-
 /// Network behaviour that handles opening substreams for custom protocols with other peers.
 ///
 /// # How it works
@@ -470,7 +470,7 @@ impl Notifications {
 		if let Some(p) = self.notif_protocols.get_mut(usize::from(set_id)) {
 			*p.handshake.write() = handshake_message.into();
 		} else {
-			log::error!(target: "sub-libp2p", "Unknown handshake change set: {:?}", set_id);
+			log::error!(target: LOG_TARGET, "Unknown handshake change set: {:?}", set_id);
 			debug_assert!(false);
 		}
 	}
@@ -487,7 +487,7 @@ impl Notifications {
 
 	/// Disconnects the given peer if we are connected to it.
 	pub fn disconnect_peer(&mut self, peer_id: &PeerId, set_id: SetId) {
-		trace!(target: "sub-libp2p", "External API => Disconnect({}, {:?})", peer_id, set_id);
+		trace!(target: LOG_TARGET, "External API => Disconnect({}, {:?})", peer_id, set_id);
 		self.disconnect_peer_inner(peer_id, set_id);
 	}
 
@@ -508,7 +508,7 @@ impl Notifications {
 
 			// DisabledPendingEnable => Disabled.
 			PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => {
-				trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+				trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 				self.protocol_controller_handles[usize::from(set_id)].dropped(*peer_id);
 				*entry.into_mut() =
 					PeerState::Disabled { connections, backoff_until: Some(timer_deadline) }
@@ -518,11 +518,11 @@ impl Notifications {
 			// All open or opening connections are sent a `Close` message.
 			// If relevant, the external API is instantly notified.
 			PeerState::Enabled { mut connections } => {
-				trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+				trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 				self.protocol_controller_handles[usize::from(set_id)].dropped(*peer_id);
 
 				if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) {
-					trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id);
+					trace!(target: LOG_TARGET, "External API <= Closed({}, {:?})", peer_id, set_id);
 					let event =
 						NotificationsOut::CustomProtocolClosed { peer_id: *peer_id, set_id };
 					self.events.push_back(ToSwarm::GenerateEvent(event));
@@ -531,7 +531,7 @@ impl Notifications {
 				for (connec_id, connec_state) in
 					connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Open(_)))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: *peer_id,
 						handler: NotifyHandler::One(*connec_id),
@@ -543,7 +543,7 @@ impl Notifications {
 				for (connec_id, connec_state) in
 					connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Opening))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: *peer_id,
 						handler: NotifyHandler::One(*connec_id),
@@ -573,7 +573,7 @@ impl Notifications {
 					inc
 				} else {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"State mismatch in libp2p: no entry in incoming for incoming peer"
 					);
 					return
@@ -585,7 +585,7 @@ impl Notifications {
 					.iter_mut()
 					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: *peer_id,
 						handler: NotifyHandler::One(*connec_id),
@@ -601,7 +601,7 @@ impl Notifications {
 			},
 
 			PeerState::Poisoned => {
-				error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id)
+				error!(target: LOG_TARGET, "State of {:?} is poisoned", peer_id)
 			},
 		}
 	}
@@ -614,12 +614,12 @@ impl Notifications {
 			Entry::Vacant(entry) => {
 				// If there's no entry in `self.peers`, start dialing.
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Starting to connect",
 					entry.key().0,
 					set_id,
 				);
-				trace!(target: "sub-libp2p", "Libp2p <= Dial {}", entry.key().0);
+				trace!(target: LOG_TARGET, "Libp2p <= Dial {}", entry.key().0);
 				self.events.push_back(ToSwarm::Dial { opts: entry.key().0.into() });
 				entry.insert(PeerState::Requested);
 				return
@@ -633,7 +633,7 @@ impl Notifications {
 			PeerState::Backoff { ref timer, ref timer_deadline } if *timer_deadline > now => {
 				let peer_id = occ_entry.key().0;
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Will start to connect at until {:?}",
 					peer_id,
 					set_id,
@@ -646,12 +646,12 @@ impl Notifications {
 			// Backoff (expired) => Requested
 			PeerState::Backoff { .. } => {
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Starting to connect",
 					occ_entry.key().0,
 					set_id,
 				);
-				trace!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key());
+				trace!(target: LOG_TARGET, "Libp2p <= Dial {:?}", occ_entry.key());
 				self.events.push_back(ToSwarm::Dial { opts: occ_entry.key().0.into() });
 				*occ_entry.into_mut() = PeerState::Requested;
 			},
@@ -662,7 +662,7 @@ impl Notifications {
 			{
 				let peer_id = occ_entry.key().0;
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): But peer is backed-off until {:?}",
 					peer_id,
 					set_id,
@@ -697,9 +697,9 @@ impl Notifications {
 				if let Some((connec_id, connec_state)) =
 					connections.iter_mut().find(|(_, s)| matches!(s, ConnectionState::Closed))
 				{
-					trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.",
+					trace!(target: LOG_TARGET, "PSM => Connect({}, {:?}): Enabling connections.",
 						occ_entry.key().0, set_id);
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *connec_id, set_id);
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})", peer_id, *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id,
 						handler: NotifyHandler::One(*connec_id),
@@ -714,7 +714,7 @@ impl Notifications {
 						matches!(s, ConnectionState::OpeningThenClosing | ConnectionState::Closing)
 					}));
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Connect({}, {:?}): No connection in proper state. Delaying.",
 						occ_entry.key().0, set_id
 					);
@@ -750,7 +750,7 @@ impl Notifications {
 			// Incoming => Incoming
 			st @ PeerState::Incoming { .. } => {
 				debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Ignoring obsolete connect, we are awaiting accept/reject.",
 					occ_entry.key().0, set_id
 				);
@@ -759,26 +759,26 @@ impl Notifications {
 
 			// Other states are kept as-is.
 			st @ PeerState::Enabled { .. } => {
-				debug!(target: "sub-libp2p",
+				debug!(target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Already connected.",
 					occ_entry.key().0, set_id);
 				*occ_entry.into_mut() = st;
 			},
 			st @ PeerState::DisabledPendingEnable { .. } => {
-				debug!(target: "sub-libp2p",
+				debug!(target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Already pending enabling.",
 					occ_entry.key().0, set_id);
 				*occ_entry.into_mut() = st;
 			},
 			st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => {
-				debug!(target: "sub-libp2p",
+				debug!(target: LOG_TARGET,
 					"PSM => Connect({}, {:?}): Duplicate request.",
 					occ_entry.key().0, set_id);
 				*occ_entry.into_mut() = st;
 			},
 
 			PeerState::Poisoned => {
-				error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key());
+				error!(target: LOG_TARGET, "State of {:?} is poisoned", occ_entry.key());
 				debug_assert!(false);
 			},
 		}
@@ -789,7 +789,7 @@ impl Notifications {
 		let mut entry = match self.peers.entry((peer_id, set_id)) {
 			Entry::Occupied(entry) => entry,
 			Entry::Vacant(entry) => {
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Already disabled.",
 					entry.key().0, set_id);
 				return
 			},
@@ -797,7 +797,7 @@ impl Notifications {
 
 		match mem::replace(entry.get_mut(), PeerState::Poisoned) {
 			st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => {
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Already disabled.",
 					entry.key().0, set_id);
 				*entry.into_mut() = st;
 			},
@@ -805,7 +805,7 @@ impl Notifications {
 			// DisabledPendingEnable => Disabled
 			PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => {
 				debug_assert!(!connections.is_empty());
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"PSM => Drop({}, {:?}): Interrupting pending enabling.",
 					entry.key().0, set_id);
 				*entry.into_mut() =
@@ -814,7 +814,7 @@ impl Notifications {
 
 			// Enabled => Disabled
 			PeerState::Enabled { mut connections } => {
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Disabling connections.",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Disabling connections.",
 					entry.key().0, set_id);
 
 				debug_assert!(connections.iter().any(|(_, s)| matches!(
@@ -823,7 +823,7 @@ impl Notifications {
 				)));
 
 				if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) {
-					trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", entry.key().0, set_id);
+					trace!(target: LOG_TARGET, "External API <= Closed({}, {:?})", entry.key().0, set_id);
 					let event =
 						NotificationsOut::CustomProtocolClosed { peer_id: entry.key().0, set_id };
 					self.events.push_back(ToSwarm::GenerateEvent(event));
@@ -832,7 +832,7 @@ impl Notifications {
 				for (connec_id, connec_state) in
 					connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Opening))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})",
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})",
 						entry.key(), *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: entry.key().0,
@@ -845,7 +845,7 @@ impl Notifications {
 				for (connec_id, connec_state) in
 					connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Open(_)))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})",
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})",
 						entry.key(), *connec_id, set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: entry.key().0,
@@ -863,14 +863,14 @@ impl Notifications {
 				// We don't cancel dialing. Libp2p doesn't expose that on purpose, as other
 				// sub-systems (such as the discovery mechanism) may require dialing this peer as
 				// well at the same time.
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected.",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Not yet connected.",
 					entry.key().0, set_id);
 				entry.remove();
 			},
 
 			// PendingRequest => Backoff
 			PeerState::PendingRequest { timer, timer_deadline } => {
-				trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected",
+				trace!(target: LOG_TARGET, "PSM => Drop({}, {:?}): Not yet connected",
 					entry.key().0, set_id);
 				*entry.into_mut() = PeerState::Backoff { timer, timer_deadline }
 			},
@@ -880,7 +880,7 @@ impl Notifications {
 			// the protocol, reject the substream
 			PeerState::Incoming { backoff_until, connections, incoming_index, .. } => {
 				debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Drop({}, {:?}): Ignoring obsolete disconnect, we are awaiting accept/reject.",
 					entry.key().0, set_id,
 				);
@@ -892,7 +892,7 @@ impl Notifications {
 				};
 			},
 			PeerState::Poisoned => {
-				error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key());
+				error!(target: LOG_TARGET, "State of {:?} is poisoned", entry.key());
 				debug_assert!(false);
 			},
 		}
@@ -944,19 +944,19 @@ impl Notifications {
 			if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) {
 				(pos, self.incoming.get(pos))
 			} else {
-				error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index);
+				error!(target: LOG_TARGET, "PSM => Accept({:?}): Invalid index", index);
 				return
 			};
 
 		let Some(incoming) = incoming else {
-			error!(target: "sub-libp2p", "Incoming connection ({:?}) doesn't exist", index);
+			error!(target: LOG_TARGET, "Incoming connection ({:?}) doesn't exist", index);
 			debug_assert!(false);
 			return;
 		};
 
 		if !incoming.alive {
 			trace!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"PSM => Accept({:?}, {}, {:?}): Obsolete incoming",
 				index,
 				incoming.peer_id,
@@ -967,7 +967,7 @@ impl Notifications {
 				Some(PeerState::DisabledPendingEnable { .. }) | Some(PeerState::Enabled { .. }) => {
 				},
 				_ => {
-					trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})",
+					trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})",
 						incoming.peer_id, incoming.set_id);
 					self.protocol_controller_handles[usize::from(incoming.set_id)]
 						.dropped(incoming.peer_id);
@@ -982,7 +982,7 @@ impl Notifications {
 			Some(s) => s,
 			None => {
 				log::debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Connection to {:?} closed, ({:?} {:?}), ignoring accept",
 					incoming.peer_id,
 					incoming.set_id,
@@ -1003,7 +1003,7 @@ impl Notifications {
 			} => {
 				if index < incoming_index {
 					warn!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Accept({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
@@ -1012,7 +1012,7 @@ impl Notifications {
 					return
 				} else if index > incoming_index {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Accept({:?}, {}, {:?}): Ignoring incoming index from the future, we are awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
@@ -1026,7 +1026,7 @@ impl Notifications {
 				// for the it to be closed so reject the substream now
 				if peerset_rejected {
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Protocol accepted ({:?} {:?} {:?}) but Peerset had request disconnection, rejecting",
 						index,
 						incoming.peer_id,
@@ -1043,7 +1043,7 @@ impl Notifications {
 				}
 
 				trace!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"PSM => Accept({:?}, {}, {:?}): Enabling connections.",
 					index,
 					incoming.peer_id,
@@ -1057,7 +1057,7 @@ impl Notifications {
 					.iter_mut()
 					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})",
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})",
 						incoming.peer_id, *connec_id, incoming.set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: incoming.peer_id,
@@ -1077,7 +1077,7 @@ impl Notifications {
 			// Any state other than `Incoming` is invalid.
 			peer => {
 				error!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"State mismatch in libp2p: Expected alive incoming. Got {:?}.",
 					peer
 				);
@@ -1106,13 +1106,13 @@ impl Notifications {
 		{
 			self.incoming.remove(pos)
 		} else {
-			error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index);
+			error!(target: LOG_TARGET, "PSM => Reject({:?}): Invalid index", index);
 			return None
 		};
 
 		if !incoming.alive {
 			trace!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"PSM => Reject({:?}, {}, {:?}): Obsolete incoming, ignoring",
 				index,
 				incoming.peer_id,
@@ -1126,7 +1126,7 @@ impl Notifications {
 			Some(s) => s,
 			None => {
 				log::debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Connection to {:?} closed, ({:?} {:?}), ignoring accept",
 					incoming.peer_id,
 					incoming.set_id,
@@ -1141,14 +1141,14 @@ impl Notifications {
 			PeerState::Incoming { mut connections, backoff_until, incoming_index, .. } => {
 				if index < incoming_index {
 					warn!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Reject({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
 					return None
 				} else if index > incoming_index {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"PSM => Reject({:?}, {}, {:?}): Ignoring incoming index from the future, we are awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
@@ -1156,7 +1156,7 @@ impl Notifications {
 					return None
 				}
 
-				trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.",
+				trace!(target: LOG_TARGET, "PSM => Reject({:?}, {}, {:?}): Rejecting connections.",
 					index, incoming.peer_id, incoming.set_id);
 
 				debug_assert!(connections
@@ -1166,7 +1166,7 @@ impl Notifications {
 					.iter_mut()
 					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
 				{
-					trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})",
+					trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Close({:?})",
 						incoming.peer_id, connec_id, incoming.set_id);
 					self.events.push_back(ToSwarm::NotifyHandler {
 						peer_id: incoming.peer_id,
@@ -1252,11 +1252,11 @@ impl NetworkBehaviour for Notifications {
 						// Requested | PendingRequest => Enabled
 						st @ &mut PeerState::Requested |
 						st @ &mut PeerState::PendingRequest { .. } => {
-							trace!(target: "sub-libp2p",
+							trace!(target: LOG_TARGET,
 								"Libp2p => Connected({}, {:?}, {:?}): Connection was requested by PSM.",
 								peer_id, set_id, endpoint
 							);
-							trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, connection_id, set_id);
+							trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})", peer_id, connection_id, set_id);
 							self.events.push_back(ToSwarm::NotifyHandler {
 								peer_id,
 								handler: NotifyHandler::One(connection_id),
@@ -1277,7 +1277,7 @@ impl NetworkBehaviour for Notifications {
 								} else {
 									None
 								};
-							trace!(target: "sub-libp2p",
+							trace!(target: LOG_TARGET,
 								"Libp2p => Connected({}, {:?}, {:?}, {:?}): Not requested by PSM, disabling.",
 								peer_id, set_id, endpoint, connection_id);
 
@@ -1292,7 +1292,7 @@ impl NetworkBehaviour for Notifications {
 						PeerState::Disabled { connections, .. } |
 						PeerState::DisabledPendingEnable { connections, .. } |
 						PeerState::Enabled { connections, .. } => {
-							trace!(target: "sub-libp2p",
+							trace!(target: LOG_TARGET,
 								"Libp2p => Connected({}, {:?}, {:?}, {:?}): Secondary connection. Leaving closed.",
 								peer_id, set_id, endpoint, connection_id);
 							connections.push((connection_id, ConnectionState::Closed));
@@ -1307,7 +1307,7 @@ impl NetworkBehaviour for Notifications {
 					{
 						entry
 					} else {
-						error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler");
+						error!(target: LOG_TARGET, "inject_connection_closed: State mismatch in the custom protos handler");
 						debug_assert!(false);
 						return
 					};
@@ -1315,7 +1315,7 @@ impl NetworkBehaviour for Notifications {
 					match mem::replace(entry.get_mut(), PeerState::Poisoned) {
 						// Disabled => Disabled | Backoff | Ø
 						PeerState::Disabled { mut connections, backoff_until } => {
-							trace!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): Disabled.",
+							trace!(target: LOG_TARGET, "Libp2p => Disconnected({}, {:?}, {:?}): Disabled.",
 								peer_id, set_id, connection_id);
 
 							if let Some(pos) =
@@ -1324,7 +1324,7 @@ impl NetworkBehaviour for Notifications {
 								connections.remove(pos);
 							} else {
 								debug_assert!(false);
-								error!(target: "sub-libp2p",
+								error!(target: LOG_TARGET,
 									"inject_connection_closed: State mismatch in the custom protos handler");
 							}
 
@@ -1366,7 +1366,7 @@ impl NetworkBehaviour for Notifications {
 							timer,
 						} => {
 							trace!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Disconnected({}, {:?}, {:?}): Disabled but pending enable.",
 								peer_id, set_id, connection_id
 							);
@@ -1376,13 +1376,13 @@ impl NetworkBehaviour for Notifications {
 							{
 								connections.remove(pos);
 							} else {
-								error!(target: "sub-libp2p",
+								error!(target: LOG_TARGET,
 									"inject_connection_closed: State mismatch in the custom protos handler");
 								debug_assert!(false);
 							}
 
 							if connections.is_empty() {
-								trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.dropped(peer_id);
 								*entry.get_mut() = PeerState::Backoff { timer, timer_deadline };
@@ -1403,7 +1403,7 @@ impl NetworkBehaviour for Notifications {
 							peerset_rejected,
 						} => {
 							trace!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Disconnected({}, {:?}, {:?}): OpenDesiredByRemote.",
 								peer_id, set_id, connection_id
 							);
@@ -1417,7 +1417,7 @@ impl NetworkBehaviour for Notifications {
 							{
 								connections.remove(pos);
 							} else {
-								error!(target: "sub-libp2p",
+								error!(target: LOG_TARGET,
 									"inject_connection_closed: State mismatch in the custom protos handler");
 								debug_assert!(false);
 							}
@@ -1439,7 +1439,7 @@ impl NetworkBehaviour for Notifications {
 								{
 									state.alive = false;
 								} else {
-									error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \
+									error!(target: LOG_TARGET, "State mismatch in libp2p: no entry in \
 										incoming corresponding to an incoming state in peers");
 									debug_assert!(false);
 								}
@@ -1489,7 +1489,7 @@ impl NetworkBehaviour for Notifications {
 						// Peers are always backed-off when disconnecting while Enabled.
 						PeerState::Enabled { mut connections } => {
 							trace!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Libp2p => Disconnected({}, {:?}, {:?}): Enabled.",
 								peer_id, set_id, connection_id
 							);
@@ -1513,7 +1513,7 @@ impl NetworkBehaviour for Notifications {
 										}) {
 										if pos <= replacement_pos {
 											trace!(
-												target: "sub-libp2p",
+												target: LOG_TARGET,
 												"External API <= Sink replaced({}, {:?})",
 												peer_id, set_id
 											);
@@ -1526,7 +1526,7 @@ impl NetworkBehaviour for Notifications {
 										}
 									} else {
 										trace!(
-											target: "sub-libp2p", "External API <= Closed({}, {:?})",
+											target: LOG_TARGET, "External API <= Closed({}, {:?})",
 											peer_id, set_id
 										);
 										let event = NotificationsOut::CustomProtocolClosed {
@@ -1537,13 +1537,13 @@ impl NetworkBehaviour for Notifications {
 									}
 								}
 							} else {
-								error!(target: "sub-libp2p",
+								error!(target: LOG_TARGET,
 									"inject_connection_closed: State mismatch in the custom protos handler");
 								debug_assert!(false);
 							}
 
 							if connections.is_empty() {
-								trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.dropped(peer_id);
 								let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng());
@@ -1566,7 +1566,7 @@ impl NetworkBehaviour for Notifications {
 							} else if !connections.iter().any(|(_, s)| {
 								matches!(s, ConnectionState::Opening | ConnectionState::Open(_))
 							}) {
-								trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.dropped(peer_id);
 
@@ -1581,13 +1581,13 @@ impl NetworkBehaviour for Notifications {
 						PeerState::PendingRequest { .. } |
 						PeerState::Backoff { .. } => {
 							// This is a serious bug either in this state machine or in libp2p.
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"`inject_connection_closed` called for unknown peer {}",
 								peer_id);
 							debug_assert!(false);
 						},
 						PeerState::Poisoned => {
-							error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id);
+							error!(target: LOG_TARGET, "State of peer {} is poisoned", peer_id);
 							debug_assert!(false);
 						},
 					}
@@ -1596,12 +1596,12 @@ impl NetworkBehaviour for Notifications {
 			FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => {
 				if let DialError::Transport(errors) = error {
 					for (addr, error) in errors.iter() {
-						trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error);
+						trace!(target: LOG_TARGET, "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error);
 					}
 				}
 
 				if let Some(peer_id) = peer_id {
-					trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id);
+					trace!(target: LOG_TARGET, "Libp2p => Dial failure for {:?}", peer_id);
 
 					for set_id in (0..self.notif_protocols.len()).map(SetId::from) {
 						if let Entry::Occupied(mut entry) = self.peers.entry((peer_id, set_id)) {
@@ -1615,7 +1615,7 @@ impl NetworkBehaviour for Notifications {
 								// requested.
 								st @ PeerState::Requested |
 								st @ PeerState::PendingRequest { .. } => {
-									trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+									trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 									self.protocol_controller_handles[usize::from(set_id)]
 										.dropped(peer_id);
 
@@ -1654,7 +1654,7 @@ impl NetworkBehaviour for Notifications {
 								},
 
 								PeerState::Poisoned => {
-									error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id);
+									error!(target: LOG_TARGET, "State of {:?} is poisoned", peer_id);
 									debug_assert!(false);
 								},
 							}
@@ -1673,7 +1673,7 @@ impl NetworkBehaviour for Notifications {
 			FromSwarm::AddressChange(_) => {},
 			FromSwarm::NewListenAddr(_) => {},
 			event => {
-				warn!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}");
+				warn!(target: LOG_TARGET, "New unknown `FromSwarm` libp2p event: {event:?}");
 			},
 		}
 	}
@@ -1688,7 +1688,7 @@ impl NetworkBehaviour for Notifications {
 			NotifsHandlerOut::OpenDesiredByRemote { protocol_index, handshake } => {
 				let set_id = SetId::from(protocol_index);
 
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({:?}, {:?}]) => OpenDesiredByRemote({:?})",
 					peer_id, connection_id, set_id);
 
@@ -1697,7 +1697,7 @@ impl NetworkBehaviour for Notifications {
 					entry
 				} else {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"OpenDesiredByRemote: State mismatch in the custom protos handler"
 					);
 					debug_assert!(false);
@@ -1733,7 +1733,7 @@ impl NetworkBehaviour for Notifications {
 							}
 						} else {
 							error!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
 							debug_assert!(false);
@@ -1757,7 +1757,7 @@ impl NetworkBehaviour for Notifications {
 							connections.iter_mut().find(|(c, _)| *c == connection_id)
 						{
 							if let ConnectionState::Closed = *connec_state {
-								trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})",
+								trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})",
 									peer_id, connection_id, set_id);
 								self.events.push_back(ToSwarm::NotifyHandler {
 									peer_id,
@@ -1779,7 +1779,7 @@ impl NetworkBehaviour for Notifications {
 							}
 						} else {
 							error!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
 							debug_assert!(false);
@@ -1799,7 +1799,7 @@ impl NetworkBehaviour for Notifications {
 								let incoming_id = self.next_incoming_index;
 								self.next_incoming_index.0 += 1;
 
-								trace!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}, {:?}).",
+								trace!(target: LOG_TARGET, "PSM <= Incoming({}, {:?}, {:?}).",
 									peer_id, set_id, incoming_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.incoming_connection(peer_id, incoming_id);
@@ -1831,7 +1831,7 @@ impl NetworkBehaviour for Notifications {
 							}
 						} else {
 							error!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
 							debug_assert!(false);
@@ -1844,7 +1844,7 @@ impl NetworkBehaviour for Notifications {
 							connections.iter_mut().find(|(c, _)| *c == connection_id)
 						{
 							if let ConnectionState::Closed = *connec_state {
-								trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})",
+								trace!(target: LOG_TARGET, "Handler({:?}, {:?}) <= Open({:?})",
 									peer_id, connection_id, set_id);
 								self.events.push_back(ToSwarm::NotifyHandler {
 									peer_id,
@@ -1871,7 +1871,7 @@ impl NetworkBehaviour for Notifications {
 							}
 						} else {
 							error!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
 							debug_assert!(false);
@@ -1879,7 +1879,7 @@ impl NetworkBehaviour for Notifications {
 					},
 
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							   "OpenDesiredByRemote: Unexpected state in the custom protos handler: {:?}",
 							   state);
 						debug_assert!(false);
@@ -1890,7 +1890,7 @@ impl NetworkBehaviour for Notifications {
 			NotifsHandlerOut::CloseDesired { protocol_index } => {
 				let set_id = SetId::from(protocol_index);
 
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({}, {:?}) => CloseDesired({:?})",
 					peer_id, connection_id, set_id);
 
@@ -1898,7 +1898,7 @@ impl NetworkBehaviour for Notifications {
 				{
 					entry
 				} else {
-					error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler");
+					error!(target: LOG_TARGET, "CloseDesired: State mismatch in the custom protos handler");
 					debug_assert!(false);
 					return
 				};
@@ -1916,7 +1916,7 @@ impl NetworkBehaviour for Notifications {
 						{
 							pos
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"CloseDesired: State mismatch in the custom protos handler");
 							debug_assert!(false);
 							return
@@ -1930,7 +1930,7 @@ impl NetworkBehaviour for Notifications {
 						debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_)));
 						connections[pos].1 = ConnectionState::Closing;
 
-						trace!(target: "sub-libp2p", "Handler({}, {:?}) <= Close({:?})", peer_id, connection_id, set_id);
+						trace!(target: LOG_TARGET, "Handler({}, {:?}) <= Close({:?})", peer_id, connection_id, set_id);
 						self.events.push_back(ToSwarm::NotifyHandler {
 							peer_id,
 							handler: NotifyHandler::One(connection_id),
@@ -1943,7 +1943,7 @@ impl NetworkBehaviour for Notifications {
 								_ => None,
 							}) {
 							if pos <= replacement_pos {
-								trace!(target: "sub-libp2p", "External API <= Sink replaced({:?}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "External API <= Sink replaced({:?}, {:?})", peer_id, set_id);
 								let event = NotificationsOut::CustomProtocolReplaced {
 									peer_id,
 									set_id,
@@ -1959,7 +1959,7 @@ impl NetworkBehaviour for Notifications {
 								.iter()
 								.any(|(_, s)| matches!(s, ConnectionState::Opening))
 							{
-								trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "PSM <= Dropped({}, {:?})", peer_id, set_id);
 								self.protocol_controller_handles[usize::from(set_id)]
 									.dropped(peer_id);
 								*entry.into_mut() =
@@ -1968,7 +1968,7 @@ impl NetworkBehaviour for Notifications {
 								*entry.into_mut() = PeerState::Enabled { connections };
 							}
 
-							trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id);
+							trace!(target: LOG_TARGET, "External API <= Closed({}, {:?})", peer_id, set_id);
 							let event = NotificationsOut::CustomProtocolClosed { peer_id, set_id };
 							self.events.push_back(ToSwarm::GenerateEvent(event));
 						}
@@ -1981,7 +1981,7 @@ impl NetworkBehaviour for Notifications {
 						*entry.into_mut() = state;
 					},
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							"Unexpected state in the custom protos handler: {:?}",
 							state);
 					},
@@ -1991,7 +1991,7 @@ impl NetworkBehaviour for Notifications {
 			NotifsHandlerOut::CloseResult { protocol_index } => {
 				let set_id = SetId::from(protocol_index);
 
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({}, {:?}) => CloseResult({:?})",
 					peer_id, connection_id, set_id);
 
@@ -2006,14 +2006,14 @@ impl NetworkBehaviour for Notifications {
 						}) {
 							*connec_state = ConnectionState::Closed;
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"CloseResult: State mismatch in the custom protos handler");
 							debug_assert!(false);
 						}
 					},
 
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							   "CloseResult: Unexpected state in the custom protos handler: {:?}",
 							   state);
 						debug_assert!(false);
@@ -2030,7 +2030,7 @@ impl NetworkBehaviour for Notifications {
 				..
 			} => {
 				let set_id = SetId::from(protocol_index);
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({}, {:?}) => OpenResultOk({:?})",
 					peer_id, connection_id, set_id);
 
@@ -2047,7 +2047,7 @@ impl NetworkBehaviour for Notifications {
 							*c == connection_id && matches!(s, ConnectionState::Opening)
 						}) {
 							if !any_open {
-								trace!(target: "sub-libp2p", "External API <= Open({}, {:?})", peer_id, set_id);
+								trace!(target: LOG_TARGET, "External API <= Open({}, {:?})", peer_id, set_id);
 								let event = NotificationsOut::CustomProtocolOpen {
 									peer_id,
 									set_id,
@@ -2070,7 +2070,7 @@ impl NetworkBehaviour for Notifications {
 							}) {
 							*connec_state = ConnectionState::Closing;
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"OpenResultOk State mismatch in the custom protos handler");
 							debug_assert!(false);
 						}
@@ -2084,14 +2084,14 @@ impl NetworkBehaviour for Notifications {
 						}) {
 							*connec_state = ConnectionState::Closing;
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"OpenResultOk State mismatch in the custom protos handler");
 							debug_assert!(false);
 						}
 					},
 
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							   "OpenResultOk: Unexpected state in the custom protos handler: {:?}",
 							   state);
 						debug_assert!(false);
@@ -2101,7 +2101,7 @@ impl NetworkBehaviour for Notifications {
 
 			NotifsHandlerOut::OpenResultErr { protocol_index } => {
 				let set_id = SetId::from(protocol_index);
-				trace!(target: "sub-libp2p",
+				trace!(target: LOG_TARGET,
 					"Handler({:?}, {:?}) => OpenResultErr({:?})",
 					peer_id, connection_id, set_id);
 
@@ -2109,7 +2109,7 @@ impl NetworkBehaviour for Notifications {
 				{
 					entry
 				} else {
-					error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler");
+					error!(target: LOG_TARGET, "OpenResultErr: State mismatch in the custom protos handler");
 					debug_assert!(false);
 					return
 				};
@@ -2132,7 +2132,7 @@ impl NetworkBehaviour for Notifications {
 							}) {
 							*connec_state = ConnectionState::Closing;
 						} else {
-							error!(target: "sub-libp2p",
+							error!(target: LOG_TARGET,
 								"OpenResultErr: State mismatch in the custom protos handler");
 							debug_assert!(false);
 						}
@@ -2140,7 +2140,7 @@ impl NetworkBehaviour for Notifications {
 						if !connections.iter().any(|(_, s)| {
 							matches!(s, ConnectionState::Opening | ConnectionState::Open(_))
 						}) {
-							trace!(target: "sub-libp2p", "PSM <= Dropped({:?}, {:?})", peer_id, set_id);
+							trace!(target: LOG_TARGET, "PSM <= Dropped({:?}, {:?})", peer_id, set_id);
 							self.protocol_controller_handles[usize::from(set_id)].dropped(peer_id);
 
 							let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng());
@@ -2166,7 +2166,7 @@ impl NetworkBehaviour for Notifications {
 									}) {
 									*connec_state = ConnectionState::Closing;
 								} else {
-									error!(target: "sub-libp2p",
+									error!(target: LOG_TARGET,
 										"OpenResultErr: State mismatch in the custom protos handler");
 									debug_assert!(false);
 								}
@@ -2180,7 +2180,7 @@ impl NetworkBehaviour for Notifications {
 						*entry.into_mut() = state;
 					},
 					state => {
-						error!(target: "sub-libp2p",
+						error!(target: LOG_TARGET,
 							"Unexpected state in the custom protos handler: {:?}",
 							state);
 						debug_assert!(false);
@@ -2192,7 +2192,7 @@ impl NetworkBehaviour for Notifications {
 				let set_id = SetId::from(protocol_index);
 				if self.is_open(&peer_id, set_id) {
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Handler({:?}) => Notification({}, {:?}, {} bytes)",
 						connection_id,
 						peer_id,
@@ -2200,7 +2200,7 @@ impl NetworkBehaviour for Notifications {
 						message.len()
 					);
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"External API <= Message({}, {:?})",
 						peer_id,
 						set_id,
@@ -2213,7 +2213,7 @@ impl NetworkBehaviour for Notifications {
 					self.events.push_back(ToSwarm::GenerateEvent(event));
 				} else {
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Handler({:?}) => Post-close notification({}, {:?}, {} bytes)",
 						connection_id,
 						peer_id,
@@ -2225,7 +2225,7 @@ impl NetworkBehaviour for Notifications {
 			NotifsHandlerOut::Close { protocol_index } => {
 				let set_id = SetId::from(protocol_index);
 
-				trace!(target: "sub-libp2p", "Handler({}, {:?}) => SyncNotificationsClogged({:?})", peer_id, connection_id, set_id);
+				trace!(target: LOG_TARGET, "Handler({}, {:?}) => SyncNotificationsClogged({:?})", peer_id, connection_id, set_id);
 				self.events.push_back(ToSwarm::CloseConnection {
 					peer_id,
 					connection: CloseConnection::One(connection_id),
@@ -2256,7 +2256,7 @@ impl NetworkBehaviour for Notifications {
 				},
 				Poll::Ready(None) => {
 					error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Protocol controllers receiver stream has returned `None`. Ignore this error if the node is shutting down.",
 					);
 					break
@@ -2314,12 +2314,12 @@ impl NetworkBehaviour for Notifications {
 
 			match peer_state {
 				PeerState::Backoff { timer, .. } if *timer == delay_id => {
-					trace!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state ({:?})", peer_id, set_id);
+					trace!(target: LOG_TARGET, "Libp2p <= Clean up ban of {:?} from the state ({:?})", peer_id, set_id);
 					self.peers.remove(&(peer_id, set_id));
 				},
 
 				PeerState::PendingRequest { timer, .. } if *timer == delay_id => {
-					trace!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired ({:?})", peer_id, set_id);
+					trace!(target: LOG_TARGET, "Libp2p <= Dial {:?} now that ban has expired ({:?})", peer_id, set_id);
 					self.events.push_back(ToSwarm::Dial { opts: peer_id.into() });
 					*peer_state = PeerState::Requested;
 				},
@@ -2331,7 +2331,7 @@ impl NetworkBehaviour for Notifications {
 					if let Some((connec_id, connec_state)) =
 						connections.iter_mut().find(|(_, s)| matches!(s, ConnectionState::Closed))
 					{
-						trace!(target: "sub-libp2p", "Handler({}, {:?}) <= Open({:?}) (ban expired)",
+						trace!(target: LOG_TARGET, "Handler({}, {:?}) <= Open({:?}) (ban expired)",
 							peer_id, *connec_id, set_id);
 						self.events.push_back(ToSwarm::NotifyHandler {
 							peer_id,
diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs
index 332de9f19c410f8ac2c9914dfd7d25e6215b7c01..416a35ad88c9ac1cf24675c37904998bb8d943f3 100644
--- a/substrate/client/network/src/protocol/notifications/handler.rs
+++ b/substrate/client/network/src/protocol/notifications/handler.rs
@@ -79,7 +79,7 @@ use libp2p::{
 	},
 	PeerId,
 };
-use log::{error, warn};
+
 use parking_lot::{Mutex, RwLock};
 use std::{
 	collections::VecDeque,
@@ -90,6 +90,9 @@ use std::{
 	time::Duration,
 };
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::notification::handler";
+
 /// Number of pending notifications in asynchronous contexts.
 /// See [`NotificationsSink::reserve_notification`] for context.
 pub(crate) const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8;
@@ -561,7 +564,7 @@ impl ConnectionHandler for NotifsHandler {
 						*pending_opening = false;
 					},
 					State::Open { .. } => {
-						error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler");
+						log::error!(target: LOG_TARGET, "☎️ State mismatch in notifications handler");
 						debug_assert!(false);
 					},
 					State::Opening { ref mut in_substream, inbound } => {
@@ -622,7 +625,7 @@ impl ConnectionHandler for NotifsHandler {
 			},
 			ConnectionEvent::ListenUpgradeError(_listen_upgrade_error) => {},
 			event => {
-				warn!(target: "sub-libp2p", "New unknown `ConnectionEvent` libp2p event: {event:?}");
+				log::warn!(target: LOG_TARGET, "New unknown `ConnectionEvent` libp2p event: {event:?}");
 			},
 		}
 	}
@@ -686,7 +689,7 @@ impl ConnectionHandler for NotifsHandler {
 					State::Opening { .. } | State::Open { .. } => {
 						// As documented, it is forbidden to send an `Open` while there is already
 						// one in the fly.
-						error!(target: "sub-libp2p", "opening already-opened handler");
+						log::error!(target: LOG_TARGET, "opening already-opened handler");
 						debug_assert!(false);
 					},
 				}
diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs
index a7eb31fc5795d6ee25d5769292428f40f3237d65..fe88a8793766e5b861bedcd5b3d3ffb87a8b4c96 100644
--- a/substrate/client/network/src/protocol/notifications/service/mod.rs
+++ b/substrate/client/network/src/protocol/notifications/service/mod.rs
@@ -49,7 +49,7 @@ pub(crate) mod metrics;
 mod tests;
 
 /// Logging target for the file.
-const LOG_TARGET: &str = "sub-libp2p";
+const LOG_TARGET: &str = "sub-libp2p::notification::service";
 
 /// Default command queue size.
 const COMMAND_QUEUE_SIZE: usize = 64;
diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs
index 9e8a03fc07c9c3490394428c355364f7a0065345..b4d0de171a183935d38c928dc5b935b95d42e2e6 100644
--- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs
+++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs
@@ -50,6 +50,9 @@ use std::{
 	vec,
 };
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::notification::upgrade";
+
 /// Maximum allowed size of the two handshake messages, in bytes.
 const MAX_HANDSHAKE_SIZE: usize = 1024;
 
@@ -210,7 +213,7 @@ where
 	/// Sends the handshake in order to inform the remote that we accept the substream.
 	pub fn send_handshake(&mut self, message: impl Into<Vec<u8>>) {
 		if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) {
-			error!(target: "sub-libp2p", "Tried to send handshake twice");
+			error!(target: LOG_TARGET, "Tried to send handshake twice");
 			return
 		}
 
@@ -349,7 +352,7 @@ impl NotificationsOut {
 	) -> Self {
 		let initial_message = initial_message.into();
 		if initial_message.len() > MAX_HANDSHAKE_SIZE {
-			error!(target: "sub-libp2p", "Outbound networking handshake is above allowed protocol limit");
+			error!(target: LOG_TARGET, "Outbound networking handshake is above allowed protocol limit");
 		}
 
 		let mut protocol_names = fallback_names;
@@ -464,7 +467,7 @@ where
 			Poll::Pending => {},
 			Poll::Ready(Some(_)) => {
 				error!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Unexpected incoming data in `NotificationsOutSubstream`",
 				);
 			},
diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs
index e21773632ed7756185873429187fb5a595326040..ac87224549f97f18fa740020bea578b2e979c2a2 100644
--- a/substrate/client/network/src/request_responses.rs
+++ b/substrate/client/network/src/request_responses.rs
@@ -64,6 +64,9 @@ use std::{
 
 pub use libp2p::request_response::{Config, InboundRequestId, OutboundRequestId};
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::request-response";
+
 /// Periodically check if requests are taking too long.
 const PERIODIC_REQUEST_CHECK: Duration = Duration::from_secs(2);
 
@@ -461,7 +464,7 @@ impl RequestResponsesBehaviour {
 		pending_response: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
 		connect: IfDisconnected,
 	) {
-		log::trace!(target: "sub-libp2p", "send request to {target} ({protocol_name:?}), {} bytes", request.len());
+		log::trace!(target: LOG_TARGET, "send request to {target} ({protocol_name:?}), {} bytes", request.len());
 
 		if let Some(ProtocolDetails { behaviour, .. }) =
 			self.protocols.get_mut(protocol_name.deref())
@@ -478,7 +481,7 @@ impl RequestResponsesBehaviour {
 			)
 		} else if pending_response.send(Err(RequestFailure::UnknownProtocol)).is_err() {
 			log::debug!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"Unknown protocol {:?}. At the same time local \
 				 node is no longer interested in the result.",
 				protocol_name,
@@ -509,7 +512,7 @@ impl RequestResponsesBehaviour {
 			debug_assert!(prev_req_id.is_none(), "Expect request id to be unique.");
 		} else if pending_response.send(Err(RequestFailure::NotConnected)).is_err() {
 			log::debug!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"Not connected to peer {:?}. At the same time local \
 				 node is no longer interested in the result.",
 				target,
@@ -615,7 +618,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 			return behaviour.on_connection_handler_event(peer_id, connection_id, event.1)
 		} else {
 			log::warn!(
-				target: "sub-libp2p",
+				target: LOG_TARGET,
 				"on_connection_handler_event: no request-response instance registered for protocol {:?}",
 				p_name
 			);
@@ -631,14 +634,14 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 						self.protocols.get(&id.protocol)
 					else {
 						log::warn!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Request {id:?} has no protocol registered.",
 						);
 
 						if let Some(response_tx) = req.response_tx.take() {
 							if response_tx.send(Err(RequestFailure::UnknownProtocol)).is_err() {
 								log::debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Request {id:?} has no protocol registered. At the same time local node is no longer interested in the result.",
 								);
 							}
@@ -649,14 +652,14 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 					let elapsed = req.started_at.elapsed();
 					if elapsed > *request_timeout {
 						log::debug!(
-							target: "sub-libp2p",
+							target: LOG_TARGET,
 							"Request {id:?} force detected as timeout.",
 						);
 
 						if let Some(response_tx) = req.response_tx.take() {
 							if response_tx.send(Err(RequestFailure::Network(OutboundFailure::Timeout))).is_err() {
 								log::debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Request {id:?} force detected as timeout. At the same time local node is no longer interested in the result.",
 								);
 							}
@@ -688,13 +691,13 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 					if let Some(ProtocolDetails { behaviour, .. }) =
 						self.protocols.get_mut(&*protocol_name)
 					{
-						log::trace!(target: "sub-libp2p", "send response to {peer} ({protocol_name:?}), {} bytes", payload.len());
+						log::trace!(target: LOG_TARGET, "send response to {peer} ({protocol_name:?}), {} bytes", payload.len());
 
 						if behaviour.send_response(inner_channel, Ok(payload)).is_err() {
 							// Note: Failure is handled further below when receiving
 							// `InboundFailure` event from request-response [`Behaviour`].
 							log::debug!(
-								target: "sub-libp2p",
+								target: LOG_TARGET,
 								"Failed to send response for {:?} on protocol {:?} due to a \
 								 timeout or due to the connection to the peer being closed. \
 								 Dropping response",
@@ -730,7 +733,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 						ToSwarm::Dial { opts } => {
 							if opts.get_peer_id().is_none() {
 								log::error!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"The request-response isn't supposed to start dialing addresses"
 								);
 							}
@@ -762,7 +765,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 
 							if reputation < BANNED_THRESHOLD {
 								log::debug!(
-									target: "sub-libp2p",
+									target: LOG_TARGET,
 									"Cannot handle requests from a node with a low reputation {}: {}",
 									peer,
 									reputation,
@@ -828,7 +831,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 									..
 								}) => {
 									log::trace!(
-										target: "sub-libp2p",
+										target: LOG_TARGET,
 										"received response from {peer} ({protocol:?}), {} bytes",
 										response.as_ref().map_or(0usize, |response| response.len()),
 									);
@@ -844,7 +847,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 								},
 								_ => {
 									log::debug!(
-										target: "sub-libp2p",
+										target: LOG_TARGET,
 										"Received `RequestResponseEvent::Message` with unexpected request id {:?} from {:?}",
 										request_id,
 										peer,
@@ -887,7 +890,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 											fallback_request
 										{
 											log::trace!(
-												target: "sub-libp2p",
+												target: LOG_TARGET,
 												"Request with id {:?} failed. Trying the fallback protocol. {}",
 												request_id,
 												fallback_protocol.deref()
@@ -907,7 +910,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 										.is_err()
 									{
 										log::debug!(
-											target: "sub-libp2p",
+											target: LOG_TARGET,
 											"Request with id {:?} failed. At the same time local \
 											 node is no longer interested in the result.",
 											request_id,
@@ -917,7 +920,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 								},
 								_ => {
 									log::debug!(
-										target: "sub-libp2p",
+										target: LOG_TARGET,
 										"Received `RequestResponseEvent::OutboundFailure` with unexpected request id {:?} error {:?} from {:?}",
 										request_id,
 										error,
diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs
index b4463ad480891eafe40199d9ff7c5ef23a111681..3f6ff7c5f6dfa75b73d4a55e0e61c3f452cacb63 100644
--- a/substrate/client/network/src/service.rs
+++ b/substrate/client/network/src/service.rs
@@ -110,6 +110,9 @@ pub(crate) mod out_events;
 pub mod signature;
 pub mod traits;
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p";
+
 struct Libp2pBandwidthSink {
 	#[allow(deprecated)]
 	sink: Arc<transport::BandwidthSinks>,
@@ -287,7 +290,7 @@ where
 			.filter(|reserved_node| {
 				if reserved_node.peer_id == local_peer_id.into() {
 					warn!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Local peer ID used in reserved node, ignoring: {}",
 						reserved_node,
 					);
@@ -329,11 +332,11 @@ where
 		}
 
 		info!(
-			target: "sub-libp2p",
+			target: LOG_TARGET,
 			"🏷  Local node identity is: {}",
 			local_peer_id.to_base58(),
 		);
-		info!(target: "sub-libp2p", "Running libp2p network backend");
+		info!(target: LOG_TARGET, "Running libp2p network backend");
 
 		let (transport, bandwidth) = {
 			let config_mem = match network_config.transport {
@@ -569,7 +572,7 @@ where
 		// Listen on multiaddresses.
 		for addr in &network_config.listen_addresses {
 			if let Err(err) = Swarm::<Behaviour<B>>::listen_on(&mut swarm, addr.clone().into()) {
-				warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err)
+				warn!(target: LOG_TARGET, "Can't listen on {} because: {:?}", addr, err)
 			}
 		}
 
@@ -681,7 +684,7 @@ where
 						) {
 						addrs.into_iter().collect()
 					} else {
-						error!(target: "sub-libp2p", "Was not able to get known addresses for {:?}", peer_id);
+						error!(target: LOG_TARGET, "Was not able to get known addresses for {:?}", peer_id);
 						return None
 					};
 
@@ -690,7 +693,7 @@ where
 					{
 						e.clone().into()
 					} else {
-						error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \
+						error!(target: LOG_TARGET, "Found state inconsistency between custom protocol \
 						and debug information about {:?}", peer_id);
 						return None
 					};
@@ -732,7 +735,7 @@ where
 						) {
 						addrs.into_iter().collect()
 					} else {
-						error!(target: "sub-libp2p", "Was not able to get known addresses for {:?}", peer_id);
+						error!(target: LOG_TARGET, "Was not able to get known addresses for {:?}", peer_id);
 						Default::default()
 					};
 
@@ -1145,7 +1148,7 @@ where
 		match Roles::decode_all(&mut &handshake[..]) {
 			Ok(role) => Some(role.into()),
 			Err(_) => {
-				log::debug!(target: "sub-libp2p", "handshake doesn't contain peer role: {handshake:?}");
+				log::debug!(target: LOG_TARGET, "handshake doesn't contain peer role: {handshake:?}");
 				self.peer_store_handle.peer_role(&(peer_id.into()))
 			},
 		}
@@ -1278,11 +1281,11 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> {
 		}
 
 		trace!(
-			target: "sub-libp2p",
+			target: LOG_TARGET,
 			"External API => Notification({:?}, {}, {} bytes)",
 			self.peer_id, self.protocol_name, notification.len(),
 		);
-		trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id);
+		trace!(target: LOG_TARGET, "Handler({:?}) <= Async notification", self.peer_id);
 
 		self.ready
 			.take()
@@ -1570,7 +1573,7 @@ where
 			}) => {
 				if listen_addrs.len() > 30 {
 					debug!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Node {:?} has reported more than 30 addresses; it is identified by {:?} and {:?}",
 						peer_id, protocol_version, agent_version
 					);
@@ -1684,9 +1687,9 @@ where
 				..
 			} => {
 				if let Some(errors) = concurrent_dial_errors {
-					debug!(target: "sub-libp2p", "Libp2p => Connected({:?}) with errors: {:?}", peer_id, errors);
+					debug!(target: LOG_TARGET, "Libp2p => Connected({:?}) with errors: {:?}", peer_id, errors);
 				} else {
-					debug!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id);
+					debug!(target: LOG_TARGET, "Libp2p => Connected({:?})", peer_id);
 				}
 
 				if let Some(metrics) = self.metrics.as_ref() {
@@ -1708,7 +1711,7 @@ where
 				endpoint,
 				num_established,
 			} => {
-				debug!(target: "sub-libp2p", "Libp2p => Disconnected({peer_id:?} via {connection_id:?}, {cause:?})");
+				debug!(target: LOG_TARGET, "Libp2p => Disconnected({peer_id:?} via {connection_id:?}, {cause:?})");
 				if let Some(metrics) = self.metrics.as_ref() {
 					let direction = match endpoint {
 						ConnectedPoint::Dialer { .. } => "out",
@@ -1728,14 +1731,14 @@ where
 				}
 			},
 			SwarmEvent::NewListenAddr { address, .. } => {
-				trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", address);
+				trace!(target: LOG_TARGET, "Libp2p => NewListenAddr({})", address);
 				if let Some(metrics) = self.metrics.as_ref() {
 					metrics.listeners_local_addresses.inc();
 				}
 				self.listen_addresses.lock().insert(address.clone());
 			},
 			SwarmEvent::ExpiredListenAddr { address, .. } => {
-				info!(target: "sub-libp2p", "📪 No longer listening on {}", address);
+				info!(target: LOG_TARGET, "📪 No longer listening on {}", address);
 				if let Some(metrics) = self.metrics.as_ref() {
 					metrics.listeners_local_addresses.dec();
 				}
@@ -1744,7 +1747,7 @@ where
 			SwarmEvent::OutgoingConnectionError { connection_id, peer_id, error } => {
 				if let Some(peer_id) = peer_id {
 					trace!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"Libp2p => Failed to reach {peer_id:?} via {connection_id:?}: {error}",
 					);
 
@@ -1800,10 +1803,10 @@ where
 				}
 			},
 			SwarmEvent::Dialing { connection_id, peer_id } => {
-				trace!(target: "sub-libp2p", "Libp2p => Dialing({peer_id:?}) via {connection_id:?}")
+				trace!(target: LOG_TARGET, "Libp2p => Dialing({peer_id:?}) via {connection_id:?}")
 			},
 			SwarmEvent::IncomingConnection { connection_id, local_addr, send_back_addr } => {
-				trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({local_addr},{send_back_addr} via {connection_id:?}))");
+				trace!(target: LOG_TARGET, "Libp2p => IncomingConnection({local_addr},{send_back_addr} via {connection_id:?}))");
 				if let Some(metrics) = self.metrics.as_ref() {
 					metrics.incoming_connections_total.inc();
 				}
@@ -1815,7 +1818,7 @@ where
 				error,
 			} => {
 				debug!(
-					target: "sub-libp2p",
+					target: LOG_TARGET,
 					"Libp2p => IncomingConnectionError({local_addr},{send_back_addr} via {connection_id:?}): {error}"
 				);
 				if let Some(metrics) = self.metrics.as_ref() {
@@ -1854,37 +1857,37 @@ where
 					addresses.into_iter().map(|a| a.to_string()).collect::<Vec<_>>().join(", ");
 				match reason {
 					Ok(()) => error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"📪 Libp2p listener ({}) closed gracefully",
 						addrs
 					),
 					Err(e) => error!(
-						target: "sub-libp2p",
+						target: LOG_TARGET,
 						"📪 Libp2p listener ({}) closed: {}",
 						addrs, e
 					),
 				}
 			},
 			SwarmEvent::ListenerError { error, .. } => {
-				debug!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error);
+				debug!(target: LOG_TARGET, "Libp2p => ListenerError: {}", error);
 				if let Some(metrics) = self.metrics.as_ref() {
 					metrics.listeners_errors_total.inc();
 				}
 			},
 			SwarmEvent::NewExternalAddrCandidate { address } => {
-				trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrCandidate: {address:?}");
+				trace!(target: LOG_TARGET, "Libp2p => NewExternalAddrCandidate: {address:?}");
 			},
 			SwarmEvent::ExternalAddrConfirmed { address } => {
-				trace!(target: "sub-libp2p", "Libp2p => ExternalAddrConfirmed: {address:?}");
+				trace!(target: LOG_TARGET, "Libp2p => ExternalAddrConfirmed: {address:?}");
 			},
 			SwarmEvent::ExternalAddrExpired { address } => {
-				trace!(target: "sub-libp2p", "Libp2p => ExternalAddrExpired: {address:?}");
+				trace!(target: LOG_TARGET, "Libp2p => ExternalAddrExpired: {address:?}");
 			},
 			SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => {
-				trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrOfPeer({peer_id:?}): {address:?}")
+				trace!(target: LOG_TARGET, "Libp2p => NewExternalAddrOfPeer({peer_id:?}): {address:?}")
 			},
 			event => {
-				warn!(target: "sub-libp2p", "New unknown SwarmEvent libp2p event: {event:?}");
+				warn!(target: LOG_TARGET, "New unknown SwarmEvent libp2p event: {event:?}");
 			},
 		}
 	}
diff --git a/substrate/frame/asset-conversion/src/types.rs b/substrate/frame/asset-conversion/src/types.rs
index 27c0e8e68805ea7716ed7371aeed532be906fa47..1fc989e71675dac378a9b8d9dfd0ffbb759003e3 100644
--- a/substrate/frame/asset-conversion/src/types.rs
+++ b/substrate/frame/asset-conversion/src/types.rs
@@ -29,7 +29,7 @@ use sp_runtime::traits::TryConvert;
 /// 1. `asset(asset1, amount_in)` take from `user` and move to the pool(asset1, asset2);
 /// 2. `asset(asset2, amount_out2)` transfer from pool(asset1, asset2) to pool(asset2, asset3);
 /// 3. `asset(asset3, amount_out3)` move from pool(asset2, asset3) to `user`.
-pub(super) type BalancePath<T> = Vec<(<T as Config>::AssetKind, <T as Config>::Balance)>;
+pub type BalancePath<T> = Vec<(<T as Config>::AssetKind, <T as Config>::Balance)>;
 
 /// Credit of [Config::Assets].
 pub type CreditOf<T> = Credit<<T as frame_system::Config>::AccountId, <T as Config>::Assets>;
diff --git a/substrate/frame/assets-freezer/src/lib.rs b/substrate/frame/assets-freezer/src/lib.rs
index 61a695a6f5b8111b7f9eb06927655c84e2ef1713..e298658f16dbc5e0c004b773f4aed9e08acc36c5 100644
--- a/substrate/frame/assets-freezer/src/lib.rs
+++ b/substrate/frame/assets-freezer/src/lib.rs
@@ -105,7 +105,7 @@ pub mod pallet {
 
 	/// A map that stores freezes applied on an account for a given AssetId.
 	#[pallet::storage]
-	pub(super) type Freezes<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type Freezes<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
@@ -120,7 +120,7 @@ pub mod pallet {
 
 	/// A map that stores the current total frozen balance for every account on a given AssetId.
 	#[pallet::storage]
-	pub(super) type FrozenBalances<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type FrozenBalances<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs
index 9ea346c4cf3fde31c26fa5e940618ce473b693ee..6e946d610e07365f9225f339c80fd9d7fa5d9ecf 100644
--- a/substrate/frame/assets/src/lib.rs
+++ b/substrate/frame/assets/src/lib.rs
@@ -419,7 +419,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Details of an asset.
-	pub(super) type Asset<T: Config<I>, I: 'static = ()> = StorageMap<
+	pub type Asset<T: Config<I>, I: 'static = ()> = StorageMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
@@ -428,7 +428,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// The holdings of a specific account for a specific asset.
-	pub(super) type Account<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type Account<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
@@ -441,7 +441,7 @@ pub mod pallet {
 	/// Approved balance transfers. First balance is the amount approved for transfer. Second
 	/// is the amount of `T::Currency` reserved for storing this.
 	/// First key is the asset ID, second key is the owner and third key is the delegate.
-	pub(super) type Approvals<T: Config<I>, I: 'static = ()> = StorageNMap<
+	pub type Approvals<T: Config<I>, I: 'static = ()> = StorageNMap<
 		_,
 		(
 			NMapKey<Blake2_128Concat, T::AssetId>,
@@ -453,7 +453,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Metadata of an asset.
-	pub(super) type Metadata<T: Config<I>, I: 'static = ()> = StorageMap<
+	pub type Metadata<T: Config<I>, I: 'static = ()> = StorageMap<
 		_,
 		Blake2_128Concat,
 		T::AssetId,
diff --git a/substrate/frame/assets/src/types.rs b/substrate/frame/assets/src/types.rs
index 9a60a13f5a71c993d460666f860d8e64731267a3..baa530565bceae25ba7f8fbb39b9456a963e8cd2 100644
--- a/substrate/frame/assets/src/types.rs
+++ b/substrate/frame/assets/src/types.rs
@@ -24,21 +24,21 @@ use frame_support::{
 };
 use sp_runtime::{traits::Convert, FixedPointNumber, FixedU128};
 
-pub(super) type DepositBalanceOf<T, I = ()> =
+pub type DepositBalanceOf<T, I = ()> =
 	<<T as Config<I>>::Currency as Currency<<T as SystemConfig>::AccountId>>::Balance;
-pub(super) type AssetAccountOf<T, I> = AssetAccount<
+pub type AssetAccountOf<T, I> = AssetAccount<
 	<T as Config<I>>::Balance,
 	DepositBalanceOf<T, I>,
 	<T as Config<I>>::Extra,
 	<T as SystemConfig>::AccountId,
 >;
-pub(super) type ExistenceReasonOf<T, I> =
+pub type ExistenceReasonOf<T, I> =
 	ExistenceReason<DepositBalanceOf<T, I>, <T as SystemConfig>::AccountId>;
 
 /// AssetStatus holds the current state of the asset. It could either be Live and available for use,
 /// or in a Destroying state.
 #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
-pub(super) enum AssetStatus {
+pub enum AssetStatus {
 	/// The asset is active and able to be used.
 	Live,
 	/// Whether the asset is frozen for non-admin transfers.
@@ -51,30 +51,30 @@ pub(super) enum AssetStatus {
 #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
 pub struct AssetDetails<Balance, AccountId, DepositBalance> {
 	/// Can change `owner`, `issuer`, `freezer` and `admin` accounts.
-	pub(super) owner: AccountId,
+	pub owner: AccountId,
 	/// Can mint tokens.
-	pub(super) issuer: AccountId,
+	pub issuer: AccountId,
 	/// Can thaw tokens, force transfers and burn tokens from any account.
-	pub(super) admin: AccountId,
+	pub admin: AccountId,
 	/// Can freeze tokens.
-	pub(super) freezer: AccountId,
+	pub freezer: AccountId,
 	/// The total supply across all accounts.
-	pub(super) supply: Balance,
+	pub supply: Balance,
 	/// The balance deposited for this asset. This pays for the data stored here.
-	pub(super) deposit: DepositBalance,
+	pub deposit: DepositBalance,
 	/// The ED for virtual accounts.
-	pub(super) min_balance: Balance,
+	pub min_balance: Balance,
 	/// If `true`, then any account with this asset is given a provider reference. Otherwise, it
 	/// requires a consumer reference.
-	pub(super) is_sufficient: bool,
+	pub is_sufficient: bool,
 	/// The total number of accounts.
-	pub(super) accounts: u32,
+	pub accounts: u32,
 	/// The total number of accounts for which we have placed a self-sufficient reference.
-	pub(super) sufficients: u32,
+	pub sufficients: u32,
 	/// The total number of approvals.
-	pub(super) approvals: u32,
+	pub approvals: u32,
 	/// The status of the asset
-	pub(super) status: AssetStatus,
+	pub status: AssetStatus,
 }
 
 /// Data concerning an approval.
@@ -82,9 +82,9 @@ pub struct AssetDetails<Balance, AccountId, DepositBalance> {
 pub struct Approval<Balance, DepositBalance> {
 	/// The amount of funds approved for the balance transfer from the owner to some delegated
 	/// target.
-	pub(super) amount: Balance,
+	pub amount: Balance,
 	/// The amount reserved on the owner's account to hold this item in storage.
-	pub(super) deposit: DepositBalance,
+	pub deposit: DepositBalance,
 }
 
 #[test]
@@ -118,7 +118,7 @@ impl<Balance, AccountId> ExistenceReason<Balance, AccountId>
 where
 	AccountId: Clone,
 {
-	pub(crate) fn take_deposit(&mut self) -> Option<Balance> {
+	pub fn take_deposit(&mut self) -> Option<Balance> {
 		if !matches!(self, ExistenceReason::DepositHeld(_)) {
 			return None
 		}
@@ -131,7 +131,7 @@ where
 		}
 	}
 
-	pub(crate) fn take_deposit_from(&mut self) -> Option<(AccountId, Balance)> {
+	pub fn take_deposit_from(&mut self) -> Option<(AccountId, Balance)> {
 		if !matches!(self, ExistenceReason::DepositFrom(..)) {
 			return None
 		}
@@ -163,11 +163,11 @@ pub enum AccountStatus {
 }
 impl AccountStatus {
 	/// Returns `true` if frozen or blocked.
-	pub(crate) fn is_frozen(&self) -> bool {
+	pub fn is_frozen(&self) -> bool {
 		matches!(self, AccountStatus::Frozen | AccountStatus::Blocked)
 	}
 	/// Returns `true` if blocked.
-	pub(crate) fn is_blocked(&self) -> bool {
+	pub fn is_blocked(&self) -> bool {
 		matches!(self, AccountStatus::Blocked)
 	}
 }
@@ -178,13 +178,13 @@ pub struct AssetAccount<Balance, DepositBalance, Extra, AccountId> {
 	///
 	/// The part of the `balance` may be frozen by the [`Config::Freezer`]. The on-hold portion is
 	/// not included here and is tracked by the [`Config::Holder`].
-	pub(super) balance: Balance,
+	pub balance: Balance,
 	/// The status of the account.
-	pub(super) status: AccountStatus,
+	pub status: AccountStatus,
 	/// The reason for the existence of the account.
-	pub(super) reason: ExistenceReason<DepositBalance, AccountId>,
+	pub reason: ExistenceReason<DepositBalance, AccountId>,
 	/// Additional "sidecar" data, in case some other pallet wants to use this storage item.
-	pub(super) extra: Extra,
+	pub extra: Extra,
 }
 
 #[derive(Clone, Encode, Decode, Eq, PartialEq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo)]
@@ -192,15 +192,15 @@ pub struct AssetMetadata<DepositBalance, BoundedString> {
 	/// The balance deposited for this metadata.
 	///
 	/// This pays for the data stored in this struct.
-	pub(super) deposit: DepositBalance,
+	pub deposit: DepositBalance,
 	/// The user friendly name of this asset. Limited in length by `StringLimit`.
-	pub(super) name: BoundedString,
+	pub name: BoundedString,
 	/// The ticker symbol for this asset. Limited in length by `StringLimit`.
-	pub(super) symbol: BoundedString,
+	pub symbol: BoundedString,
 	/// The number of decimals this asset uses to represent one unit.
-	pub(super) decimals: u8,
+	pub decimals: u8,
 	/// Whether the asset metadata may be changed by a non Force origin.
-	pub(super) is_frozen: bool,
+	pub is_frozen: bool,
 }
 
 /// Trait for allowing a minimum balance on the account to be specified, beyond the
@@ -275,28 +275,28 @@ impl<AssetId, AccountId, Balance> BalanceOnHold<AssetId, AccountId, Balance> for
 }
 
 #[derive(Copy, Clone, PartialEq, Eq)]
-pub(super) struct TransferFlags {
+pub struct TransferFlags {
 	/// The debited account must stay alive at the end of the operation; an error is returned if
 	/// this cannot be achieved legally.
-	pub(super) keep_alive: bool,
+	pub keep_alive: bool,
 	/// Less than the amount specified needs be debited by the operation for it to be considered
 	/// successful. If `false`, then the amount debited will always be at least the amount
 	/// specified.
-	pub(super) best_effort: bool,
+	pub best_effort: bool,
 	/// Any additional funds debited (due to minimum balance requirements) should be burned rather
 	/// than credited to the destination account.
-	pub(super) burn_dust: bool,
+	pub burn_dust: bool,
 }
 
 #[derive(Copy, Clone, PartialEq, Eq)]
-pub(super) struct DebitFlags {
+pub struct DebitFlags {
 	/// The debited account must stay alive at the end of the operation; an error is returned if
 	/// this cannot be achieved legally.
-	pub(super) keep_alive: bool,
+	pub keep_alive: bool,
 	/// Less than the amount specified needs be debited by the operation for it to be considered
 	/// successful. If `false`, then the amount debited will always be at least the amount
 	/// specified.
-	pub(super) best_effort: bool,
+	pub best_effort: bool,
 }
 
 impl From<TransferFlags> for DebitFlags {
diff --git a/substrate/frame/authority-discovery/src/lib.rs b/substrate/frame/authority-discovery/src/lib.rs
index 220b39292b57598cdc1def77baef6874d7165c95..6883a07ad2891b216c988bb9b6f67cdb76fe6464 100644
--- a/substrate/frame/authority-discovery/src/lib.rs
+++ b/substrate/frame/authority-discovery/src/lib.rs
@@ -51,12 +51,12 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Keys of the current authority set.
-	pub(super) type Keys<T: Config> =
+	pub type Keys<T: Config> =
 		StorageValue<_, WeakBoundedVec<AuthorityId, T::MaxAuthorities>, ValueQuery>;
 
 	#[pallet::storage]
 	/// Keys of the next authority set.
-	pub(super) type NextKeys<T: Config> =
+	pub type NextKeys<T: Config> =
 		StorageValue<_, WeakBoundedVec<AuthorityId, T::MaxAuthorities>, ValueQuery>;
 
 	#[derive(frame_support::DefaultNoBound)]
@@ -210,6 +210,7 @@ mod tests {
 		type ValidatorId = AuthorityId;
 		type ValidatorIdOf = ConvertInto;
 		type NextSessionRotation = pallet_session::PeriodicSessions<Period, Offset>;
+		type DisablingStrategy = ();
 		type WeightInfo = ();
 	}
 
diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs
index 1e4f51d5143098cd146fed6f35743482a6cfb1f1..ea977a547fee80ba6545c53221e9535d37ffbf16 100644
--- a/substrate/frame/babe/src/mock.rs
+++ b/substrate/frame/babe/src/mock.rs
@@ -100,12 +100,13 @@ impl pallet_session::Config for Test {
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
 	type SessionHandler = <MockSessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = MockSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<u64, u128>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Self>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 impl pallet_authorship::Config for Test {
diff --git a/substrate/frame/bags-list/src/lib.rs b/substrate/frame/bags-list/src/lib.rs
index ae65cc0783c93aa58330bb75298d819b6b81d7b5..606b07b6e7b6f302b013cf2a9a9ec8738208e0b9 100644
--- a/substrate/frame/bags-list/src/lib.rs
+++ b/substrate/frame/bags-list/src/lib.rs
@@ -253,14 +253,14 @@ pub mod pallet {
 	///
 	/// Nodes store links forward and back within their respective bags.
 	#[pallet::storage]
-	pub(crate) type ListNodes<T: Config<I>, I: 'static = ()> =
+	pub type ListNodes<T: Config<I>, I: 'static = ()> =
 		CountedStorageMap<_, Twox64Concat, T::AccountId, list::Node<T, I>>;
 
 	/// A bag stored in storage.
 	///
 	/// Stores a `Bag` struct, which stores head and tail pointers to itself.
 	#[pallet::storage]
-	pub(crate) type ListBags<T: Config<I>, I: 'static = ()> =
+	pub type ListBags<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::Score, list::Bag<T, I>>;
 
 	#[pallet::event]
@@ -273,7 +273,6 @@ pub mod pallet {
 	}
 
 	#[pallet::error]
-	#[cfg_attr(test, derive(PartialEq))]
 	pub enum Error<T, I = ()> {
 		/// A error in the list interface implementation.
 		List(ListError),
diff --git a/substrate/frame/bags-list/src/list/mod.rs b/substrate/frame/bags-list/src/list/mod.rs
index 6b0d1afcd8b2891a39fad53f91562e455c7d01a0..6bcc8efbd5d4a941ec6b25c36ac73d0d94d2fdd0 100644
--- a/substrate/frame/bags-list/src/list/mod.rs
+++ b/substrate/frame/bags-list/src/list/mod.rs
@@ -35,7 +35,7 @@ use frame_election_provider_support::ScoreProvider;
 use frame_support::{
 	defensive, ensure,
 	traits::{Defensive, DefensiveOption, Get},
-	DefaultNoBound, PalletError,
+	CloneNoBound, DefaultNoBound, EqNoBound, PalletError, PartialEqNoBound, RuntimeDebugNoBound,
 };
 use scale_info::TypeInfo;
 use sp_runtime::traits::{Bounded, Zero};
@@ -622,18 +622,27 @@ impl<T: Config<I>, I: 'static> List<T, I> {
 /// desirable to ensure that there is some element of first-come, first-serve to the list's
 /// iteration so that there's no incentive to churn ids positioning to improve the chances of
 /// appearing within the ids set.
-#[derive(DefaultNoBound, Encode, Decode, MaxEncodedLen, TypeInfo)]
+#[derive(
+	DefaultNoBound,
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	RuntimeDebugNoBound,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+)]
 #[codec(mel_bound())]
 #[scale_info(skip_type_params(T, I))]
-#[cfg_attr(feature = "std", derive(frame_support::DebugNoBound, Clone, PartialEq))]
 pub struct Bag<T: Config<I>, I: 'static = ()> {
-	head: Option<T::AccountId>,
-	tail: Option<T::AccountId>,
+	pub head: Option<T::AccountId>,
+	pub tail: Option<T::AccountId>,
 
 	#[codec(skip)]
-	bag_upper: T::Score,
+	pub bag_upper: T::Score,
 	#[codec(skip)]
-	_phantom: PhantomData<I>,
+	pub _phantom: PhantomData<I>,
 }
 
 impl<T: Config<I>, I: 'static> Bag<T, I> {
@@ -822,18 +831,26 @@ impl<T: Config<I>, I: 'static> Bag<T, I> {
 }
 
 /// A Node is the fundamental element comprising the doubly-linked list described by `Bag`.
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo)]
+#[derive(
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+	RuntimeDebugNoBound,
+)]
 #[codec(mel_bound())]
 #[scale_info(skip_type_params(T, I))]
-#[cfg_attr(feature = "std", derive(frame_support::DebugNoBound, Clone, PartialEq))]
 pub struct Node<T: Config<I>, I: 'static = ()> {
-	pub(crate) id: T::AccountId,
-	pub(crate) prev: Option<T::AccountId>,
-	pub(crate) next: Option<T::AccountId>,
-	pub(crate) bag_upper: T::Score,
-	pub(crate) score: T::Score,
+	pub id: T::AccountId,
+	pub prev: Option<T::AccountId>,
+	pub next: Option<T::AccountId>,
+	pub bag_upper: T::Score,
+	pub score: T::Score,
 	#[codec(skip)]
-	pub(crate) _phantom: PhantomData<I>,
+	pub _phantom: PhantomData<I>,
 }
 
 impl<T: Config<I>, I: 'static> Node<T, I> {
diff --git a/substrate/frame/beefy-mmr/src/mock.rs b/substrate/frame/beefy-mmr/src/mock.rs
index 6756c618d706d2b0b6bc79560df954c93cdb54ea..aa6905306cd6a9b22d717dacbb53c998b3e70774 100644
--- a/substrate/frame/beefy-mmr/src/mock.rs
+++ b/substrate/frame/beefy-mmr/src/mock.rs
@@ -72,6 +72,7 @@ impl pallet_session::Config for Test {
 	type SessionManager = MockSessionManager;
 	type SessionHandler = <MockSessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = MockSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs
index 2f90edf3c358a65249bc1d3b053f458fd75e0bfa..655a36ab14612900b68b6d12e94155a388373f8a 100644
--- a/substrate/frame/beefy/src/mock.rs
+++ b/substrate/frame/beefy/src/mock.rs
@@ -184,12 +184,13 @@ impl pallet_session::Config for Test {
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
 	type SessionHandler = <MockSessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = MockSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<u64, u128>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Self>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 impl pallet_authorship::Config for Test {
diff --git a/substrate/frame/conviction-voting/src/lib.rs b/substrate/frame/conviction-voting/src/lib.rs
index 3dd2ad24298d344d4122933546406fd3dc8c9fb9..fda97281f16bd64f941d9314a83032f02f22b173 100644
--- a/substrate/frame/conviction-voting/src/lib.rs
+++ b/substrate/frame/conviction-voting/src/lib.rs
@@ -68,9 +68,9 @@ pub type BlockNumberFor<T, I> =
 	<<T as Config<I>>::BlockNumberProvider as BlockNumberProvider>::BlockNumber;
 
 type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;
-type BalanceOf<T, I = ()> =
+pub type BalanceOf<T, I = ()> =
 	<<T as Config<I>>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
-type VotingOf<T, I = ()> = Voting<
+pub type VotingOf<T, I = ()> = Voting<
 	BalanceOf<T, I>,
 	<T as frame_system::Config>::AccountId,
 	BlockNumberFor<T, I>,
@@ -82,10 +82,10 @@ type DelegatingOf<T, I = ()> =
 	Delegating<BalanceOf<T, I>, <T as frame_system::Config>::AccountId, BlockNumberFor<T, I>>;
 pub type TallyOf<T, I = ()> = Tally<BalanceOf<T, I>, <T as Config<I>>::MaxTurnout>;
 pub type VotesOf<T, I = ()> = BalanceOf<T, I>;
-type PollIndexOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Index;
+pub type PollIndexOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Index;
 #[cfg(feature = "runtime-benchmarks")]
-type IndexOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Index;
-type ClassOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Class;
+pub type IndexOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Index;
+pub type ClassOf<T, I = ()> = <<T as Config<I>>::Polls as Polling<TallyOf<T, I>>>::Class;
 
 #[frame_support::pallet]
 pub mod pallet {
diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs
index 22ba63b26161d45efea8189e60d24bf0cbb9e04b..77ea937eac7a9519ef50971fa38cbbcb41bde6fe 100644
--- a/substrate/frame/core-fellowship/src/lib.rs
+++ b/substrate/frame/core-fellowship/src/lib.rs
@@ -241,17 +241,16 @@ pub mod pallet {
 
 	/// The overall status of the system.
 	#[pallet::storage]
-	pub(super) type Params<T: Config<I>, I: 'static = ()> =
-		StorageValue<_, ParamsOf<T, I>, ValueQuery>;
+	pub type Params<T: Config<I>, I: 'static = ()> = StorageValue<_, ParamsOf<T, I>, ValueQuery>;
 
 	/// The status of a claimant.
 	#[pallet::storage]
-	pub(super) type Member<T: Config<I>, I: 'static = ()> =
+	pub type Member<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::AccountId, MemberStatusOf<T>, OptionQuery>;
 
 	/// Some evidence together with the desired outcome for which it was presented.
 	#[pallet::storage]
-	pub(super) type MemberEvidence<T: Config<I>, I: 'static = ()> =
+	pub type MemberEvidence<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::AccountId, (Wish, Evidence<T, I>), OptionQuery>;
 
 	#[pallet::event]
diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs
index 0dacfe9c55792f53e07fe708323ed6e0f8d5c784..fadc8d290d6f9e7ff05e718b57e0bdaa5e685e4a 100644
--- a/substrate/frame/delegated-staking/src/lib.rs
+++ b/substrate/frame/delegated-staking/src/lib.rs
@@ -273,12 +273,12 @@ pub mod pallet {
 	/// Implementation note: We are not using a double map with `delegator` and `agent` account
 	/// as keys since we want to restrict delegators to delegate only to one account at a time.
 	#[pallet::storage]
-	pub(crate) type Delegators<T: Config> =
+	pub type Delegators<T: Config> =
 		CountedStorageMap<_, Twox64Concat, T::AccountId, Delegation<T>, OptionQuery>;
 
 	/// Map of `Agent` to their `Ledger`.
 	#[pallet::storage]
-	pub(crate) type Agents<T: Config> =
+	pub type Agents<T: Config> =
 		CountedStorageMap<_, Twox64Concat, T::AccountId, AgentLedger<T>, OptionQuery>;
 
 	// This pallet is not currently written with the intention of exposing any calls. But the
diff --git a/substrate/frame/election-provider-multi-block/src/lib.rs b/substrate/frame/election-provider-multi-block/src/lib.rs
index 355f117bc4573d6fc5f96cd48b412edce9218aff..ea30fb239aee3ac986858e1bc1392fa947d75347 100644
--- a/substrate/frame/election-provider-multi-block/src/lib.rs
+++ b/substrate/frame/election-provider-multi-block/src/lib.rs
@@ -66,7 +66,7 @@
 //!
 //! ## Pagination
 //!
-//! Most of the external APIs of this pallet are paginated. All pagination follow a patter where if
+//! Most of the external APIs of this pallet are paginated. All pagination follow a pattern where if
 //! `N` pages exist, the first paginated call is `function(N-1)` and the last one is `function(0)`.
 //! For example, with 3 pages, the `elect` of [`ElectionProvider`] is expected to be called as
 //! `elect(2) -> elect(1) -> elect(0)`. In essence, calling a paginated function with index 0 is
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs
index b1029e89fe85f65650fb5406314241c220cd2b28..fa64dd6f7d6ebd32998f93e95e2aba8b685fa916 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs
@@ -170,7 +170,7 @@ fn mass_slash_doesnt_enter_emergency_phase() {
 		}
 
 		// Ensure no more than disabling limit of validators (default 1/3) is disabled
-		let disabling_limit = pallet_staking::UpToLimitWithReEnablingDisablingStrategy::<
+		let disabling_limit = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy::<
 			SLASHING_DISABLING_FACTOR,
 		>::disable_limit(active_set_size_before_slash);
 		assert!(disabled.len() == disabling_limit);
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
index 8c8de865600c0964bb69ef6fa530b2527b60794d..120deff96a75eb0d297efc9bc017b2946f39d86c 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
@@ -142,11 +142,14 @@ impl pallet_session::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = pallet_staking::StashOf<Runtime>;
+	type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy<
+		SLASHING_DISABLING_FACTOR,
+	>;
 	type WeightInfo = ();
 }
 impl pallet_session::historical::Config for Runtime {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Runtime>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 frame_election_provider_support::generate_solution_type!(
@@ -335,8 +338,6 @@ impl pallet_staking::Config for Runtime {
 	type MaxUnlockingChunks = MaxUnlockingChunks;
 	type EventListeners = (Pools, DelegatedStaking);
 	type WeightInfo = pallet_staking::weights::SubstrateWeight<Runtime>;
-	type DisablingStrategy =
-		pallet_staking::UpToLimitWithReEnablingDisablingStrategy<SLASHING_DISABLING_FACTOR>;
 	type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig;
 }
 
@@ -908,10 +909,7 @@ pub(crate) fn on_offence_now(
 // Add offence to validator, slash it.
 pub(crate) fn add_slash(who: &AccountId) {
 	on_offence_now(
-		&[OffenceDetails {
-			offender: (*who, Staking::eras_stakers(active_era(), who)),
-			reporters: vec![],
-		}],
+		&[OffenceDetails { offender: (*who, ()), reporters: vec![] }],
 		&[Perbill::from_percent(10)],
 	);
 }
diff --git a/substrate/frame/fast-unstake/src/types.rs b/substrate/frame/fast-unstake/src/types.rs
index 2a2319ef61296a5781b9ff752d45da1aa7c4cbf6..518840a16a30367974d7ce4a85712081bb57dc83 100644
--- a/substrate/frame/fast-unstake/src/types.rs
+++ b/substrate/frame/fast-unstake/src/types.rs
@@ -20,7 +20,7 @@
 use crate::Config;
 use codec::{Decode, Encode, MaxEncodedLen};
 use frame_support::{
-	traits::Currency, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound,
+	traits::Currency, BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound,
 };
 use scale_info::TypeInfo;
 use sp_staking::{EraIndex, StakingInterface};
@@ -39,14 +39,21 @@ impl<T: Config> frame_support::traits::Get<u32> for MaxChecking<T> {
 }
 
 #[docify::export]
-pub(crate) type BalanceOf<T> =
+pub type BalanceOf<T> =
 	<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
 /// An unstake request.
 ///
 /// This is stored in [`crate::Head`] storage item and points to the current unstake request that is
 /// being processed.
 #[derive(
-	Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen,
+	Encode,
+	Decode,
+	EqNoBound,
+	PartialEqNoBound,
+	CloneNoBound,
+	TypeInfo,
+	RuntimeDebugNoBound,
+	MaxEncodedLen,
 )]
 #[scale_info(skip_type_params(T))]
 pub struct UnstakeRequest<T: Config> {
diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs
index a14bdc9d73b1f95d08df167f0f1394fadf48525a..482e767d32fc05faee88a866603ab5d48fa421b1 100644
--- a/substrate/frame/grandpa/src/mock.rs
+++ b/substrate/frame/grandpa/src/mock.rs
@@ -104,12 +104,13 @@ impl pallet_session::Config for Test {
 	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
 	type SessionHandler = <TestSessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = TestSessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<u64, u128>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Self>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 impl pallet_authorship::Config for Test {
diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs
index a5d9a6e20e616945d00016944eea16d15b2d050d..4ccbde193147815e270c9023306e9b9d34232b76 100644
--- a/substrate/frame/im-online/src/mock.rs
+++ b/substrate/frame/im-online/src/mock.rs
@@ -127,6 +127,7 @@ impl pallet_session::Config for Runtime {
 	type Keys = UintAuthorityId;
 	type RuntimeEvent = RuntimeEvent;
 	type NextSessionRotation = pallet_session::PeriodicSessions<Period, Offset>;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs
index 869b4adc2adcea00c274711c4f5dee1228eaca70..2da5ab9c337f0dd7ca75ff8635a1077ae745e852 100644
--- a/substrate/frame/multisig/src/lib.rs
+++ b/substrate/frame/multisig/src/lib.rs
@@ -74,7 +74,7 @@ macro_rules! log {
 	};
 }
 
-type BalanceOf<T> =
+pub type BalanceOf<T> =
 	<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
 
 pub type BlockNumberFor<T> =
@@ -88,9 +88,9 @@ pub type BlockNumberFor<T> =
 )]
 pub struct Timepoint<BlockNumber> {
 	/// The height of the chain at the point in time.
-	height: BlockNumber,
+	pub height: BlockNumber,
 	/// The index of the extrinsic at the point in time.
-	index: u32,
+	pub index: u32,
 }
 
 /// An open multisig operation.
@@ -101,13 +101,13 @@ where
 	MaxApprovals: Get<u32>,
 {
 	/// The extrinsic when the multisig operation was opened.
-	when: Timepoint<BlockNumber>,
+	pub when: Timepoint<BlockNumber>,
 	/// The amount held in reserve of the `depositor`, to be returned once the operation ends.
-	deposit: Balance,
+	pub deposit: Balance,
 	/// The account who opened it (i.e. the first to approve it).
-	depositor: AccountId,
+	pub depositor: AccountId,
 	/// The approvals achieved so far, including the depositor. Always sorted.
-	approvals: BoundedVec<AccountId, MaxApprovals>,
+	pub approvals: BoundedVec<AccountId, MaxApprovals>,
 }
 
 type CallHash = [u8; 32];
@@ -157,7 +157,28 @@ pub mod pallet {
 		/// Weight information for extrinsics in this pallet.
 		type WeightInfo: weights::WeightInfo;
 
-		/// Provider for the block number. Normally this is the `frame_system` pallet.
+		/// Query the current block number.
+		///
+		/// Must return monotonically increasing values when called from consecutive blocks.
+		/// Can be configured to return either:
+		/// - the local block number of the runtime via `frame_system::Pallet`
+		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
+		/// - an arbitrary value through a custom implementation of the trait
+		///
+		/// There is currently no migration provided to "hot-swap" block number providers and it may
+		/// result in undefined behavior when doing so. Parachains are therefore best off setting
+		/// this to their local block number provider if they have the pallet already deployed.
+		///
+		/// Suggested values:
+		/// - Solo- and Relay-chains: `frame_system::Pallet`
+		/// - Parachains that may produce blocks sparingly or only when needed (on-demand):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: `RelaychainDataProvider`
+		/// - Parachains with a reliably block production rate (PLO or bulk-coretime):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: no strong recommendation. Both local and remote
+		///     providers can be used. Relay provider can be a bit better in cases where the
+		///     parachain is lagging its block production to avoid clock skew.
 		type BlockNumberProvider: BlockNumberProvider;
 	}
 
diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs
index b45861289a561f2ec33b6f41e225384f83feda89..86545cdee8583c2e29984c9df792bceafc9bce9c 100644
--- a/substrate/frame/nomination-pools/src/lib.rs
+++ b/substrate/frame/nomination-pools/src/lib.rs
@@ -497,7 +497,8 @@ impl ClaimPermission {
 	TypeInfo,
 	RuntimeDebugNoBound,
 	CloneNoBound,
-	frame_support::PartialEqNoBound,
+	PartialEqNoBound,
+	EqNoBound,
 )]
 #[cfg_attr(feature = "std", derive(DefaultNoBound))]
 #[scale_info(skip_type_params(T))]
@@ -1295,8 +1296,17 @@ impl<T: Config> BondedPool<T> {
 /// A reward pool is not so much a pool anymore, since it does not contain any shares or points.
 /// Rather, simply to fit nicely next to bonded pool and unbonding pools in terms of terminology. In
 /// reality, a reward pool is just a container for a few pool-dependent data related to the rewards.
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebugNoBound)]
-#[cfg_attr(feature = "std", derive(Clone, PartialEq, DefaultNoBound))]
+#[derive(
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+	RuntimeDebugNoBound,
+)]
+#[cfg_attr(feature = "std", derive(DefaultNoBound))]
 #[codec(mel_bound(T: Config))]
 #[scale_info(skip_type_params(T))]
 pub struct RewardPool<T: Config> {
@@ -1304,19 +1314,19 @@ pub struct RewardPool<T: Config> {
 	///
 	/// This is updated ONLY when the points in the bonded pool change, which means `join`,
 	/// `bond_extra` and `unbond`, all of which is done through `update_recorded`.
-	last_recorded_reward_counter: T::RewardCounter,
+	pub last_recorded_reward_counter: T::RewardCounter,
 	/// The last recorded total payouts of the reward pool.
 	///
 	/// Payouts is essentially income of the pool.
 	///
 	/// Update criteria is same as that of `last_recorded_reward_counter`.
-	last_recorded_total_payouts: BalanceOf<T>,
+	pub last_recorded_total_payouts: BalanceOf<T>,
 	/// Total amount that this pool has paid out so far to the members.
-	total_rewards_claimed: BalanceOf<T>,
+	pub total_rewards_claimed: BalanceOf<T>,
 	/// The amount of commission pending to be claimed.
-	total_commission_pending: BalanceOf<T>,
+	pub total_commission_pending: BalanceOf<T>,
 	/// The amount of commission that has been claimed.
-	total_commission_claimed: BalanceOf<T>,
+	pub total_commission_claimed: BalanceOf<T>,
 }
 
 impl<T: Config> RewardPool<T> {
@@ -1455,15 +1465,24 @@ impl<T: Config> RewardPool<T> {
 }
 
 /// An unbonding pool. This is always mapped with an era.
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, DefaultNoBound, RuntimeDebugNoBound)]
-#[cfg_attr(feature = "std", derive(Clone, PartialEq, Eq))]
+#[derive(
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	DefaultNoBound,
+	RuntimeDebugNoBound,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+)]
 #[codec(mel_bound(T: Config))]
 #[scale_info(skip_type_params(T))]
 pub struct UnbondPool<T: Config> {
 	/// The points in this pool.
-	points: BalanceOf<T>,
+	pub points: BalanceOf<T>,
 	/// The funds in the pool.
-	balance: BalanceOf<T>,
+	pub balance: BalanceOf<T>,
 }
 
 impl<T: Config> UnbondPool<T> {
@@ -1498,17 +1517,26 @@ impl<T: Config> UnbondPool<T> {
 	}
 }
 
-#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, DefaultNoBound, RuntimeDebugNoBound)]
-#[cfg_attr(feature = "std", derive(Clone, PartialEq))]
+#[derive(
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	TypeInfo,
+	DefaultNoBound,
+	RuntimeDebugNoBound,
+	CloneNoBound,
+	PartialEqNoBound,
+	EqNoBound,
+)]
 #[codec(mel_bound(T: Config))]
 #[scale_info(skip_type_params(T))]
 pub struct SubPools<T: Config> {
 	/// A general, era agnostic pool of funds that have fully unbonded. The pools
 	/// of `Self::with_era` will lazily be merged into into this pool if they are
 	/// older then `current_era - TotalUnbondingPools`.
-	no_era: UnbondPool<T>,
+	pub no_era: UnbondPool<T>,
 	/// Map of era in which a pool becomes unbonded in => unbond pools.
-	with_era: BoundedBTreeMap<EraIndex, UnbondPool<T>, TotalUnbondingPools<T>>,
+	pub with_era: BoundedBTreeMap<EraIndex, UnbondPool<T>, TotalUnbondingPools<T>>,
 }
 
 impl<T: Config> SubPools<T> {
diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs
index 3d3cd470bc24cc3fd63eaf09a52b99ee5eb7b33e..fa4349d1d94c848ff46435daa409d3c659e19388 100644
--- a/substrate/frame/offences/benchmarking/src/inner.rs
+++ b/substrate/frame/offences/benchmarking/src/inner.rs
@@ -170,6 +170,13 @@ fn make_offenders<T: Config>(
 	Ok(id_tuples)
 }
 
+#[cfg(test)]
+fn run_staking_next_block<T: Config>() {
+	use frame_support::traits::Hooks;
+	System::<T>::set_block_number(System::<T>::block_number().saturating_add(1u32.into()));
+	Staking::<T>::on_initialize(System::<T>::block_number());
+}
+
 #[cfg(test)]
 fn assert_all_slashes_applied<T>(offender_count: usize)
 where
@@ -182,10 +189,10 @@ where
 	// make sure that all slashes have been applied
 	// deposit to reporter + reporter account endowed.
 	assert_eq!(System::<T>::read_events_for_pallet::<pallet_balances::Event<T>>().len(), 2);
-	// (n nominators + one validator) * slashed + Slash Reported
+	// (n nominators + one validator) * slashed + Slash Reported + Slash Computed
 	assert_eq!(
 		System::<T>::read_events_for_pallet::<pallet_staking::Event<T>>().len(),
-		1 * (offender_count + 1) as usize + 1
+		1 * (offender_count + 1) as usize + 2
 	);
 	// offence
 	assert_eq!(System::<T>::read_events_for_pallet::<pallet_offences::Event>().len(), 1);
@@ -232,6 +239,8 @@ mod benchmarks {
 
 		#[cfg(test)]
 		{
+			// slashes applied at the next block.
+			run_staking_next_block::<T>();
 			assert_all_slashes_applied::<T>(n as usize);
 		}
 
@@ -266,6 +275,8 @@ mod benchmarks {
 		}
 		#[cfg(test)]
 		{
+			// slashes applied at the next block.
+			run_staking_next_block::<T>();
 			assert_all_slashes_applied::<T>(n as usize);
 		}
 
diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs
index 46a4e18c5e8fc23f5312833d59de27a88bd82b7f..63e440d9e004238e19af547cb7045c77355fe692 100644
--- a/substrate/frame/offences/benchmarking/src/mock.rs
+++ b/substrate/frame/offences/benchmarking/src/mock.rs
@@ -33,7 +33,6 @@ use sp_runtime::{
 };
 
 type AccountId = u64;
-type Balance = u64;
 
 #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
 impl frame_system::Config for Test {
@@ -54,8 +53,8 @@ impl pallet_timestamp::Config for Test {
 	type WeightInfo = ();
 }
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Test>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 sp_runtime::impl_opaque_keys! {
@@ -95,6 +94,7 @@ impl pallet_session::Config for Test {
 	type RuntimeEvent = RuntimeEvent;
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = pallet_staking::StashOf<Test>;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs
index 849ffddf4fb3c0c4e88fbf534a33312d0e2a83b9..744e2d18d67bca99e75136c17bf5a7f5b8586187 100644
--- a/substrate/frame/preimage/src/lib.rs
+++ b/substrate/frame/preimage/src/lib.rs
@@ -88,12 +88,12 @@ pub enum RequestStatus<AccountId, Ticket> {
 	Requested { maybe_ticket: Option<(AccountId, Ticket)>, count: u32, maybe_len: Option<u32> },
 }
 
-type BalanceOf<T> =
+pub type BalanceOf<T> =
 	<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
-type TicketOf<T> = <T as Config>::Consideration;
+pub type TicketOf<T> = <T as Config>::Consideration;
 
 /// Maximum size of preimage we can store is 4mb.
-const MAX_SIZE: u32 = 4 * 1024 * 1024;
+pub const MAX_SIZE: u32 = 4 * 1024 * 1024;
 /// Hard-limit on the number of hashes that can be passed to `ensure_updated`.
 ///
 /// Exists only for benchmarking purposes.
@@ -132,7 +132,7 @@ pub mod pallet {
 	pub struct Pallet<T>(_);
 
 	#[pallet::event]
-	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	#[pallet::generate_deposit(pub fn deposit_event)]
 	pub enum Event<T: Config> {
 		/// A preimage has been noted.
 		Noted { hash: T::Hash },
@@ -172,16 +172,16 @@ pub mod pallet {
 	/// The request status of a given hash.
 	#[deprecated = "RequestStatusFor"]
 	#[pallet::storage]
-	pub(super) type StatusFor<T: Config> =
+	pub type StatusFor<T: Config> =
 		StorageMap<_, Identity, T::Hash, OldRequestStatus<T::AccountId, BalanceOf<T>>>;
 
 	/// The request status of a given hash.
 	#[pallet::storage]
-	pub(super) type RequestStatusFor<T: Config> =
+	pub type RequestStatusFor<T: Config> =
 		StorageMap<_, Identity, T::Hash, RequestStatus<T::AccountId, TicketOf<T>>>;
 
 	#[pallet::storage]
-	pub(super) type PreimageFor<T: Config> =
+	pub type PreimageFor<T: Config> =
 		StorageMap<_, Identity, (T::Hash, u32), BoundedVec<u8, ConstU32<MAX_SIZE>>>;
 
 	#[pallet::call(weight = T::WeightInfo)]
diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs
index 1fe9772617221c069ccd71543ae1de8e4af0b4b2..594d1721cd41bcb08c7ed3238dad7c19bc225553 100644
--- a/substrate/frame/proxy/src/lib.rs
+++ b/substrate/frame/proxy/src/lib.rs
@@ -167,7 +167,28 @@ pub mod pallet {
 		#[pallet::constant]
 		type AnnouncementDepositFactor: Get<BalanceOf<Self>>;
 
-		/// Provider for the block number. Normally this is the `frame_system` pallet.
+		/// Query the current block number.
+		///
+		/// Must return monotonically increasing values when called from consecutive blocks.
+		/// Can be configured to return either:
+		/// - the local block number of the runtime via `frame_system::Pallet`
+		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
+		/// - an arbitrary value through a custom implementation of the trait
+		///
+		/// There is currently no migration provided to "hot-swap" block number providers and it may
+		/// result in undefined behavior when doing so. Parachains are therefore best off setting
+		/// this to their local block number provider if they have the pallet already deployed.
+		///
+		/// Suggested values:
+		/// - Solo- and Relay-chains: `frame_system::Pallet`
+		/// - Parachains that may produce blocks sparingly or only when needed (on-demand):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: `RelaychainDataProvider`
+		/// - Parachains with a reliably block production rate (PLO or bulk-coretime):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: no strong recommendation. Both local and remote
+		///     providers can be used. Relay provider can be a bit better in cases where the
+		///     parachain is lagging its block production to avoid clock skew.
 		type BlockNumberProvider: BlockNumberProvider;
 	}
 
diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs
index 42fb641983f6308a9809ca8ec856ee2d019ab569..8159bbefa76b1765d7986781887ac373ea694924 100644
--- a/substrate/frame/recovery/src/lib.rs
+++ b/substrate/frame/recovery/src/lib.rs
@@ -240,7 +240,28 @@ pub mod pallet {
 			+ GetDispatchInfo
 			+ From<frame_system::Call<Self>>;
 
-		/// Provider for the block number. Normally this is the `frame_system` pallet.
+		/// Query the current block number.
+		///
+		/// Must return monotonically increasing values when called from consecutive blocks.
+		/// Can be configured to return either:
+		/// - the local block number of the runtime via `frame_system::Pallet`
+		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
+		/// - an arbitrary value through a custom implementation of the trait
+		///
+		/// There is currently no migration provided to "hot-swap" block number providers and it may
+		/// result in undefined behavior when doing so. Parachains are therefore best off setting
+		/// this to their local block number provider if they have the pallet already deployed.
+		///
+		/// Suggested values:
+		/// - Solo- and Relay-chains: `frame_system::Pallet`
+		/// - Parachains that may produce blocks sparingly or only when needed (on-demand):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: `RelaychainDataProvider`
+		/// - Parachains with a reliably block production rate (PLO or bulk-coretime):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: no strong recommendation. Both local and remote
+		///     providers can be used. Relay provider can be a bit better in cases where the
+		///     parachain is lagging its block production to avoid clock skew.
 		type BlockNumberProvider: BlockNumberProvider;
 
 		/// The currency mechanism.
diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml
index b207a6041b9b6cf3230a713270c48d23eb5b4b8b..33d447e67a20369f2a1f18623a8a879bf654fce1 100644
--- a/substrate/frame/revive/rpc/Cargo.toml
+++ b/substrate/frame/revive/rpc/Cargo.toml
@@ -75,3 +75,6 @@ pretty_assertions = { workspace = true }
 static_init = { workspace = true }
 substrate-cli-test-utils = { workspace = true }
 subxt-signer = { workspace = true, features = ["unstable-eth"] }
+
+[build-dependencies]
+git2 = { version = "0.20.0", default-features = false }
diff --git a/substrate/frame/revive/rpc/build.rs b/substrate/frame/revive/rpc/build.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d2ea601211a00a6f0b44e5c4dc272f592cdb0a16
--- /dev/null
+++ b/substrate/frame/revive/rpc/build.rs
@@ -0,0 +1,44 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+use std::process::Command;
+
+/// Get the current branch and commit hash.
+fn main() {
+	let output = Command::new("rustc")
+		.arg("--version")
+		.output()
+		.expect("cannot get the current rustc version");
+	// Exports the default rustc --version output:
+	// e.g. rustc 1.83.0 (90b35a623 2024-11-26)
+	// into the usual Ethereum web3_clientVersion format
+	// e.g. rustc1.83.0
+	let rustc_version = String::from_utf8_lossy(&output.stdout)
+		.split_whitespace()
+		.take(2)
+		.collect::<Vec<_>>()
+		.join("");
+	let target = std::env::var("TARGET").unwrap_or_else(|_| "unknown".to_string());
+
+	let repo = git2::Repository::open("../../../..").expect("should be a repository");
+	let head = repo.head().expect("should have head");
+	let commit = head.peel_to_commit().expect("should have commit");
+	let branch = head.shorthand().unwrap_or("unknown").to_string();
+	let id = &commit.id().to_string()[..7];
+	println!("cargo:rustc-env=GIT_REVISION={branch}-{id}");
+	println!("cargo:rustc-env=RUSTC_VERSION={rustc_version}");
+	println!("cargo:rustc-env=TARGET={target}");
+}
diff --git a/substrate/frame/revive/rpc/src/apis/execution_apis.rs b/substrate/frame/revive/rpc/src/apis/execution_apis.rs
index f55209fce585606a026430619ec9453c3163c234..b867e8acf30f06f764f4aea38c94048baa74ef9b 100644
--- a/substrate/frame/revive/rpc/src/apis/execution_apis.rs
+++ b/substrate/frame/revive/rpc/src/apis/execution_apis.rs
@@ -166,4 +166,8 @@ pub trait EthRpc {
 	/// The string value of current network id
 	#[method(name = "net_version")]
 	async fn net_version(&self) -> RpcResult<String>;
+
+	/// The string value of the current client version
+	#[method(name = "web3_clientVersion")]
+	async fn web3_client_version(&self) -> RpcResult<String>;
 }
diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs
index 8d6797722d4f2bf4eb954832692ca9ff11b1d62c..31af6a5bbb0debf9a3941e579971e70c46068c78 100644
--- a/substrate/frame/revive/rpc/src/lib.rs
+++ b/substrate/frame/revive/rpc/src/lib.rs
@@ -352,4 +352,11 @@ impl EthRpcServer for EthRpcServerImpl {
 		let nonce = self.client.nonce(address, block).await?;
 		Ok(nonce)
 	}
+
+	async fn web3_client_version(&self) -> RpcResult<String> {
+		let git_revision = env!("GIT_REVISION");
+		let rustc_version = env!("RUSTC_VERSION");
+		let target = env!("TARGET");
+		Ok(format!("eth-rpc/{git_revision}/{target}/{rustc_version}"))
+	}
 }
diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs
index fd6ffc55e40c34d8e78fa879078c0be2d1cf86b1..8e91c4ecfd1cd1cf90cd28a3dd6e278001b332b4 100644
--- a/substrate/frame/root-offences/src/lib.rs
+++ b/substrate/frame/root-offences/src/lib.rs
@@ -31,7 +31,7 @@ extern crate alloc;
 
 use alloc::vec::Vec;
 use pallet_session::historical::IdentificationTuple;
-use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking};
+use pallet_staking::Pallet as Staking;
 use sp_runtime::Perbill;
 use sp_staking::offence::OnOffenceHandler;
 
@@ -49,11 +49,8 @@ pub mod pallet {
 		+ pallet_staking::Config
 		+ pallet_session::Config<ValidatorId = <Self as frame_system::Config>::AccountId>
 		+ pallet_session::historical::Config<
-			FullIdentification = Exposure<
-				<Self as frame_system::Config>::AccountId,
-				BalanceOf<Self>,
-			>,
-			FullIdentificationOf = ExposureOf<Self>,
+			FullIdentification = (),
+			FullIdentificationOf = pallet_staking::NullIdentity,
 		>
 	{
 		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
@@ -106,15 +103,11 @@ pub mod pallet {
 		fn get_offence_details(
 			offenders: Vec<(T::AccountId, Perbill)>,
 		) -> Result<Vec<OffenceDetails<T>>, DispatchError> {
-			let now = pallet_staking::ActiveEra::<T>::get()
-				.map(|e| e.index)
-				.ok_or(Error::<T>::FailedToGetActiveEra)?;
-
 			Ok(offenders
 				.clone()
 				.into_iter()
 				.map(|(o, _)| OffenceDetails::<T> {
-					offender: (o.clone(), Staking::<T>::eras_stakers(now, &o)),
+					offender: (o.clone(), ()),
 					reporters: Default::default(),
 				})
 				.collect())
@@ -124,7 +117,7 @@ pub mod pallet {
 		fn submit_offence(offenders: &[OffenceDetails<T>], slash_fraction: &[Perbill]) {
 			let session_index = <pallet_session::Pallet<T> as frame_support::traits::ValidatorSet<T::AccountId>>::session_index();
 
-			<pallet_staking::Pallet<T> as OnOffenceHandler<
+			<Staking<T> as OnOffenceHandler<
 				T::AccountId,
 				IdentificationTuple<T>,
 				Weight,
diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs
index 2303221c8819a1440880ae24daccb893f4c8b50e..ce55bdcbdd3c4b0cca5523fd81953177a7649d3c 100644
--- a/substrate/frame/root-offences/src/mock.rs
+++ b/substrate/frame/root-offences/src/mock.rs
@@ -28,7 +28,7 @@ use frame_support::{
 	traits::{ConstU32, ConstU64, OneSessionHandler},
 	BoundedVec,
 };
-use pallet_staking::StakerStatus;
+use pallet_staking::{BalanceOf, StakerStatus};
 use sp_core::ConstBool;
 use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage};
 use sp_staking::{EraIndex, SessionIndex};
@@ -148,8 +148,8 @@ impl pallet_staking::Config for Test {
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Test>;
+	type FullIdentification = ();
+	type FullIdentificationOf = pallet_staking::NullIdentity;
 }
 
 sp_runtime::impl_opaque_keys! {
@@ -167,6 +167,7 @@ impl pallet_session::Config for Test {
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = pallet_staking::StashOf<Test>;
 	type NextSessionRotation = pallet_session::PeriodicSessions<Period, Offset>;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
@@ -297,6 +298,11 @@ pub(crate) fn run_to_block(n: BlockNumber) {
 	);
 }
 
+/// Progress by n block.
+pub(crate) fn advance_blocks(n: u64) {
+	run_to_block(System::block_number() + n);
+}
+
 pub(crate) fn active_era() -> EraIndex {
 	pallet_staking::ActiveEra::<Test>::get().unwrap().index
 }
diff --git a/substrate/frame/root-offences/src/tests.rs b/substrate/frame/root-offences/src/tests.rs
index 289bb708efbbc0d9496499a2c9e8b25c240715f9..da6c49895bec1da8acff7baaf8bc20e82b4d5847 100644
--- a/substrate/frame/root-offences/src/tests.rs
+++ b/substrate/frame/root-offences/src/tests.rs
@@ -17,7 +17,10 @@
 
 use super::*;
 use frame_support::{assert_err, assert_ok};
-use mock::{active_era, start_session, ExtBuilder, RootOffences, RuntimeOrigin, System, Test as T};
+use mock::{
+	active_era, advance_blocks, start_session, ExtBuilder, RootOffences, RuntimeOrigin, System,
+	Test as T,
+};
 use pallet_staking::asset;
 
 #[test]
@@ -42,6 +45,10 @@ fn create_offence_works_given_root_origin() {
 		assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone()));
 
 		System::assert_last_event(Event::OffenceCreated { offenders }.into());
+
+		// offence is processed in the following block.
+		advance_blocks(1);
+
 		// the slash should be applied right away.
 		assert_eq!(asset::staked::<T>(&11), 500);
 
@@ -66,6 +73,9 @@ fn create_offence_wont_slash_non_active_validators() {
 
 		System::assert_last_event(Event::OffenceCreated { offenders }.into());
 
+		// advance to the next block so offence gets processed.
+		advance_blocks(1);
+
 		// so 31 didn't get slashed.
 		assert_eq!(asset::staked::<T>(&31), 500);
 
diff --git a/substrate/frame/salary/src/lib.rs b/substrate/frame/salary/src/lib.rs
index 6a843625f4a7bc61901b890f79135c183188d354..45c711e21c109f9d897c3dfdd137881349824abc 100644
--- a/substrate/frame/salary/src/lib.rs
+++ b/substrate/frame/salary/src/lib.rs
@@ -136,12 +136,11 @@ pub mod pallet {
 
 	/// The overall status of the system.
 	#[pallet::storage]
-	pub(super) type Status<T: Config<I>, I: 'static = ()> =
-		StorageValue<_, StatusOf<T, I>, OptionQuery>;
+	pub type Status<T: Config<I>, I: 'static = ()> = StorageValue<_, StatusOf<T, I>, OptionQuery>;
 
 	/// The status of a claimant.
 	#[pallet::storage]
-	pub(super) type Claimant<T: Config<I>, I: 'static = ()> =
+	pub type Claimant<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::AccountId, ClaimantStatusOf<T, I>, OptionQuery>;
 
 	#[pallet::event]
diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs
index 80ba7fd06da071a3c0fd96178d8d715dd7a2c150..2ad94ec04df47c24f9df6c13207b7263a26c825b 100644
--- a/substrate/frame/scheduler/src/lib.rs
+++ b/substrate/frame/scheduler/src/lib.rs
@@ -146,20 +146,20 @@ struct ScheduledV1<Call, BlockNumber> {
 }
 
 /// Information regarding an item to be executed in the future.
-#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))]
-#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)]
+#[derive(Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)]
 pub struct Scheduled<Name, Call, BlockNumber, PalletsOrigin, AccountId> {
 	/// The unique identity for this task, if there is one.
-	maybe_id: Option<Name>,
+	pub maybe_id: Option<Name>,
 	/// This task's priority.
-	priority: schedule::Priority,
+	pub priority: schedule::Priority,
 	/// The call to be dispatched.
-	call: Call,
+	pub call: Call,
 	/// If the call is periodic, then this points to the information concerning that.
-	maybe_periodic: Option<schedule::Period<BlockNumber>>,
+	pub maybe_periodic: Option<schedule::Period<BlockNumber>>,
 	/// The origin with which to dispatch the call.
-	origin: PalletsOrigin,
-	_phantom: PhantomData<AccountId>,
+	pub origin: PalletsOrigin,
+	#[doc(hidden)]
+	pub _phantom: PhantomData<AccountId>,
 }
 
 impl<Name, Call, BlockNumber, PalletsOrigin, AccountId>
@@ -351,7 +351,7 @@ pub mod pallet {
 	/// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4
 	/// identities.
 	#[pallet::storage]
-	pub(crate) type Lookup<T: Config> =
+	pub type Lookup<T: Config> =
 		StorageMap<_, Twox64Concat, TaskName, TaskAddress<BlockNumberFor<T>>>;
 
 	/// Events type.
diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs
index b0681f5aa000f7914f2147300457e8aee97ccd47..746c3b12e972b8fad920c7b4a552ecc89fe7d211 100644
--- a/substrate/frame/session/benchmarking/src/mock.rs
+++ b/substrate/frame/session/benchmarking/src/mock.rs
@@ -27,11 +27,11 @@ use frame_support::{
 	derive_impl, parameter_types,
 	traits::{ConstU32, ConstU64},
 };
+use pallet_staking::NullIdentity;
 use sp_runtime::{traits::IdentityLookup, BuildStorage, KeyTypeId};
 
 type AccountId = u64;
 type Nonce = u32;
-type Balance = u64;
 
 type Block = frame_system::mocking::MockBlock<Test>;
 
@@ -68,8 +68,8 @@ impl pallet_timestamp::Config for Test {
 	type WeightInfo = ();
 }
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = pallet_staking::ExposureOf<Test>;
+	type FullIdentification = ();
+	type FullIdentificationOf = NullIdentity;
 }
 
 sp_runtime::impl_opaque_keys! {
@@ -104,6 +104,7 @@ impl pallet_session::Config for Test {
 	type RuntimeEvent = RuntimeEvent;
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = pallet_staking::StashOf<Test>;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 pallet_staking_reward_curve::build! {
diff --git a/substrate/frame/session/src/disabling.rs b/substrate/frame/session/src/disabling.rs
new file mode 100644
index 0000000000000000000000000000000000000000..0780f95ae421936b17ecb1bbdbbb665f3b426f22
--- /dev/null
+++ b/substrate/frame/session/src/disabling.rs
@@ -0,0 +1,199 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::*;
+use frame_support::defensive;
+/// Controls validator disabling
+pub trait DisablingStrategy<T: Config> {
+	/// Make a disabling decision. Returning a [`DisablingDecision`]
+	fn decision(
+		offender_stash: &T::ValidatorId,
+		offender_slash_severity: OffenceSeverity,
+		currently_disabled: &Vec<(u32, OffenceSeverity)>,
+	) -> DisablingDecision;
+}
+
+/// Helper struct representing a decision coming from a given [`DisablingStrategy`] implementing
+/// `decision`
+///
+/// `disable` is the index of the validator to disable,
+/// `reenable` is the index of the validator to re-enable.
+#[derive(Debug)]
+pub struct DisablingDecision {
+	pub disable: Option<u32>,
+	pub reenable: Option<u32>,
+}
+
+impl<T: Config> DisablingStrategy<T> for () {
+	fn decision(
+		_offender_stash: &T::ValidatorId,
+		_offender_slash_severity: OffenceSeverity,
+		_currently_disabled: &Vec<(u32, OffenceSeverity)>,
+	) -> DisablingDecision {
+		DisablingDecision { disable: None, reenable: None }
+	}
+}
+/// Calculate the disabling limit based on the number of validators and the disabling limit factor.
+///
+/// This is a sensible default implementation for the disabling limit factor for most disabling
+/// strategies.
+///
+/// Disabling limit factor n=2 -> 1/n = 1/2 = 50% of validators can be disabled
+fn factor_based_disable_limit(validators_len: usize, disabling_limit_factor: usize) -> usize {
+	validators_len
+		.saturating_sub(1)
+		.checked_div(disabling_limit_factor)
+		.unwrap_or_else(|| {
+			defensive!("DISABLING_LIMIT_FACTOR should not be 0");
+			0
+		})
+}
+
+/// Implementation of [`DisablingStrategy`] using factor_based_disable_limit which disables
+/// validators from the active set up to a threshold. `DISABLING_LIMIT_FACTOR` is the factor of the
+/// maximum disabled validators in the active set. E.g. setting this value to `3` means no more than
+/// 1/3 of the validators in the active set can be disabled in an era.
+///
+/// By default a factor of 3 is used which is the byzantine threshold.
+pub struct UpToLimitDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
+
+impl<const DISABLING_LIMIT_FACTOR: usize> UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR> {
+	/// Disabling limit calculated from the total number of validators in the active set. When
+	/// reached no more validators will be disabled.
+	pub fn disable_limit(validators_len: usize) -> usize {
+		factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
+	}
+}
+
+impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
+	for UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR>
+{
+	fn decision(
+		offender_stash: &T::ValidatorId,
+		_offender_slash_severity: OffenceSeverity,
+		currently_disabled: &Vec<(u32, OffenceSeverity)>,
+	) -> DisablingDecision {
+		let active_set = Validators::<T>::get();
+
+		// We don't disable more than the limit
+		if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
+			log!(
+				debug,
+				"Won't disable: reached disabling limit {:?}",
+				Self::disable_limit(active_set.len())
+			);
+			return DisablingDecision { disable: None, reenable: None }
+		}
+
+		let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
+			idx as u32
+		} else {
+			log!(debug, "Won't disable: offender not in active set",);
+			return DisablingDecision { disable: None, reenable: None }
+		};
+
+		log!(debug, "Will disable {:?}", offender_idx);
+
+		DisablingDecision { disable: Some(offender_idx), reenable: None }
+	}
+}
+
+/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a
+/// limit (factor_based_disable_limit) and if the limit is reached and the new offender is higher
+/// (bigger punishment/severity) then it re-enables the lowest offender to free up space for the new
+/// offender.
+///
+/// This strategy is not based on cumulative severity of offences but only on the severity of the
+/// highest offence. Offender first committing a 25% offence and then a 50% offence will be treated
+/// the same as an offender committing 50% offence.
+///
+/// An extension of [`UpToLimitDisablingStrategy`].
+pub struct UpToLimitWithReEnablingDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
+
+impl<const DISABLING_LIMIT_FACTOR: usize>
+	UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
+{
+	/// Disabling limit calculated from the total number of validators in the active set. When
+	/// reached re-enabling logic might kick in.
+	pub fn disable_limit(validators_len: usize) -> usize {
+		factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
+	}
+}
+
+impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
+	for UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
+{
+	fn decision(
+		offender_stash: &T::ValidatorId,
+		offender_slash_severity: OffenceSeverity,
+		currently_disabled: &Vec<(u32, OffenceSeverity)>,
+	) -> DisablingDecision {
+		let active_set = Validators::<T>::get();
+
+		// We don't disable validators that are not in the active set
+		let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
+			idx as u32
+		} else {
+			log!(debug, "Won't disable: offender not in active set",);
+			return DisablingDecision { disable: None, reenable: None }
+		};
+
+		// Check if offender is already disabled
+		if let Some((_, old_severity)) =
+			currently_disabled.iter().find(|(idx, _)| *idx == offender_idx)
+		{
+			if offender_slash_severity > *old_severity {
+				log!(debug, "Offender already disabled but with lower severity, will disable again to refresh severity of {:?}", offender_idx);
+				return DisablingDecision { disable: Some(offender_idx), reenable: None };
+			} else {
+				log!(debug, "Offender already disabled with higher or equal severity");
+				return DisablingDecision { disable: None, reenable: None };
+			}
+		}
+
+		// We don't disable more than the limit (but we can re-enable a smaller offender to make
+		// space)
+		if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
+			log!(
+				debug,
+				"Reached disabling limit {:?}, checking for re-enabling",
+				Self::disable_limit(active_set.len())
+			);
+
+			// Find the smallest offender to re-enable that is not higher than
+			// offender_slash_severity
+			if let Some((smallest_idx, _)) = currently_disabled
+				.iter()
+				.filter(|(_, severity)| *severity <= offender_slash_severity)
+				.min_by_key(|(_, severity)| *severity)
+			{
+				log!(debug, "Will disable {:?} and re-enable {:?}", offender_idx, smallest_idx);
+				return DisablingDecision {
+					disable: Some(offender_idx),
+					reenable: Some(*smallest_idx),
+				}
+			} else {
+				log!(debug, "No smaller offender found to re-enable");
+				return DisablingDecision { disable: None, reenable: None }
+			}
+		} else {
+			// If we are not at the limit, just disable the new offender and dont re-enable anyone
+			log!(debug, "Will disable {:?}", offender_idx);
+			return DisablingDecision { disable: Some(offender_idx), reenable: None }
+		}
+	}
+}
diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs
index 98ce774e428154e0752f553d4dbfddb438a0b4f2..a80a2b235757bab833397289a5753402a6cd3b93 100644
--- a/substrate/frame/session/src/lib.rs
+++ b/substrate/frame/session/src/lib.rs
@@ -106,6 +106,7 @@
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
+pub mod disabling;
 #[cfg(feature = "historical")]
 pub mod historical;
 pub mod migrations;
@@ -123,6 +124,7 @@ use core::{
 	marker::PhantomData,
 	ops::{Rem, Sub},
 };
+use disabling::DisablingStrategy;
 use frame_support::{
 	dispatch::DispatchResult,
 	ensure,
@@ -136,13 +138,26 @@ use frame_support::{
 use frame_system::pallet_prelude::BlockNumberFor;
 use sp_runtime::{
 	traits::{AtLeast32BitUnsigned, Convert, Member, One, OpaqueKeys, Zero},
-	ConsensusEngineId, DispatchError, KeyTypeId, Permill, RuntimeAppPublic,
+	ConsensusEngineId, DispatchError, KeyTypeId, Perbill, Permill, RuntimeAppPublic,
 };
-use sp_staking::SessionIndex;
+use sp_staking::{offence::OffenceSeverity, SessionIndex};
 
 pub use pallet::*;
 pub use weights::WeightInfo;
 
+pub(crate) const LOG_TARGET: &str = "runtime::session";
+
+// syntactic sugar for logging.
+#[macro_export]
+macro_rules! log {
+	($level:tt, $patter:expr $(, $values:expr)* $(,)?) => {
+		log::$level!(
+			target: crate::LOG_TARGET,
+			concat!("[{:?}] 💸 ", $patter), <frame_system::Pallet<T>>::block_number() $(, $values)*
+		)
+	};
+}
+
 /// Decides whether the session should be ended.
 pub trait ShouldEndSession<BlockNumber> {
 	/// Return `true` if the session should be ended.
@@ -375,7 +390,7 @@ pub mod pallet {
 	use frame_system::pallet_prelude::*;
 
 	/// The in-code storage version.
-	const STORAGE_VERSION: StorageVersion = StorageVersion::new(0);
+	const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
 
 	#[pallet::pallet]
 	#[pallet::storage_version(STORAGE_VERSION)]
@@ -385,7 +400,7 @@ pub mod pallet {
 	#[pallet::config]
 	pub trait Config: frame_system::Config {
 		/// The overarching event type.
-		type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
+		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
 
 		/// A stable ID for a validator.
 		type ValidatorId: Member
@@ -416,6 +431,9 @@ pub mod pallet {
 		/// The keys.
 		type Keys: OpaqueKeys + Member + Parameter + MaybeSerializeDeserialize;
 
+		/// `DisablingStragegy` controls how validators are disabled
+		type DisablingStrategy: DisablingStrategy<Self>;
+
 		/// Weight information for extrinsics in this pallet.
 		type WeightInfo: WeightInfo;
 	}
@@ -518,7 +536,7 @@ pub mod pallet {
 	/// disabled using binary search. It gets cleared when `on_session_ending` returns
 	/// a new set of identities.
 	#[pallet::storage]
-	pub type DisabledValidators<T> = StorageValue<_, Vec<u32>, ValueQuery>;
+	pub type DisabledValidators<T> = StorageValue<_, Vec<(u32, OffenceSeverity)>, ValueQuery>;
 
 	/// The next session keys for a validator.
 	#[pallet::storage]
@@ -532,10 +550,14 @@ pub mod pallet {
 
 	#[pallet::event]
 	#[pallet::generate_deposit(pub(super) fn deposit_event)]
-	pub enum Event {
+	pub enum Event<T: Config> {
 		/// New session has happened. Note that the argument is the session index, not the
 		/// block number as the type might suggest.
 		NewSession { session_index: SessionIndex },
+		/// Validator has been disabled.
+		ValidatorDisabled { validator: T::ValidatorId },
+		/// Validator has been re-enabled.
+		ValidatorReenabled { validator: T::ValidatorId },
 	}
 
 	/// Error for the session pallet.
@@ -631,7 +653,7 @@ impl<T: Config> Pallet<T> {
 
 	/// Public function to access the disabled validators.
 	pub fn disabled_validators() -> Vec<u32> {
-		DisabledValidators::<T>::get()
+		DisabledValidators::<T>::get().iter().map(|(i, _)| *i).collect()
 	}
 
 	/// Move on to next session. Register new validator set and session keys. Changes to the
@@ -644,7 +666,7 @@ impl<T: Config> Pallet<T> {
 		// Inform the session handlers that a session is going to end.
 		T::SessionHandler::on_before_session_ending();
 		T::SessionManager::end_session(session_index);
-		log::trace!(target: "runtime::session", "ending_session {:?}", session_index);
+		log!(trace, "ending_session {:?}", session_index);
 
 		// Get queued session keys and validators.
 		let session_keys = QueuedKeys::<T>::get();
@@ -724,14 +746,16 @@ impl<T: Config> Pallet<T> {
 	}
 
 	/// Disable the validator of index `i`, returns `false` if the validator was already disabled.
+	///
+	/// Note: This sets the OffenceSeverity to the lowest value.
 	pub fn disable_index(i: u32) -> bool {
-		if i >= Validators::<T>::decode_len().unwrap_or(0) as u32 {
+		if i >= Validators::<T>::decode_len().defensive_unwrap_or(0) as u32 {
 			return false
 		}
 
 		DisabledValidators::<T>::mutate(|disabled| {
-			if let Err(index) = disabled.binary_search(&i) {
-				disabled.insert(index, i);
+			if let Err(index) = disabled.binary_search_by_key(&i, |(index, _)| *index) {
+				disabled.insert(index, (i, OffenceSeverity(Perbill::zero())));
 				T::SessionHandler::on_disabled(i);
 				return true
 			}
@@ -740,23 +764,6 @@ impl<T: Config> Pallet<T> {
 		})
 	}
 
-	/// Re-enable the validator of index `i`, returns `false` if the validator was already enabled.
-	pub fn enable_index(i: u32) -> bool {
-		if i >= Validators::<T>::decode_len().defensive_unwrap_or(0) as u32 {
-			return false
-		}
-
-		// If the validator is not disabled, return false.
-		DisabledValidators::<T>::mutate(|disabled| {
-			if let Ok(index) = disabled.binary_search(&i) {
-				disabled.remove(index);
-				true
-			} else {
-				false
-			}
-		})
-	}
-
 	/// Disable the validator identified by `c`. (If using with the staking pallet,
 	/// this would be their *stash* account.)
 	///
@@ -920,6 +927,47 @@ impl<T: Config> Pallet<T> {
 	fn clear_key_owner(id: KeyTypeId, key_data: &[u8]) {
 		KeyOwner::<T>::remove((id, key_data));
 	}
+
+	pub fn report_offence(validator: T::ValidatorId, severity: OffenceSeverity) {
+		DisabledValidators::<T>::mutate(|disabled| {
+			let decision = T::DisablingStrategy::decision(&validator, severity, &disabled);
+
+			if let Some(offender_idx) = decision.disable {
+				// Check if the offender is already disabled
+				match disabled.binary_search_by_key(&offender_idx, |(index, _)| *index) {
+					// Offender is already disabled, update severity if the new one is higher
+					Ok(index) => {
+						let (_, old_severity) = &mut disabled[index];
+						if severity > *old_severity {
+							*old_severity = severity;
+						}
+					},
+					Err(index) => {
+						// Offender is not disabled, add to `DisabledValidators` and disable it
+						disabled.insert(index, (offender_idx, severity));
+						// let the session handlers know that a validator got disabled
+						T::SessionHandler::on_disabled(offender_idx);
+
+						// Emit event that a validator got disabled
+						Self::deposit_event(Event::ValidatorDisabled {
+							validator: validator.clone(),
+						});
+					},
+				}
+			}
+
+			if let Some(reenable_idx) = decision.reenable {
+				// Remove the validator from `DisabledValidators` and re-enable it.
+				if let Ok(index) = disabled.binary_search_by_key(&reenable_idx, |(index, _)| *index)
+				{
+					disabled.remove(index);
+					// Emit event that a validator got re-enabled
+					let reenabled_stash = Validators::<T>::get()[reenable_idx as usize].clone();
+					Self::deposit_event(Event::ValidatorReenabled { validator: reenabled_stash });
+				}
+			}
+		});
+	}
 }
 
 impl<T: Config> ValidatorRegistration<T::ValidatorId> for Pallet<T> {
@@ -955,11 +1003,11 @@ impl<T: Config> EstimateNextNewSession<BlockNumberFor<T>> for Pallet<T> {
 
 impl<T: Config> frame_support::traits::DisabledValidators for Pallet<T> {
 	fn is_disabled(index: u32) -> bool {
-		DisabledValidators::<T>::get().binary_search(&index).is_ok()
+		DisabledValidators::<T>::get().binary_search_by_key(&index, |(i, _)| *i).is_ok()
 	}
 
 	fn disabled_validators() -> Vec<u32> {
-		DisabledValidators::<T>::get()
+		Self::disabled_validators()
 	}
 }
 
diff --git a/substrate/frame/session/src/migrations/historical.rs b/substrate/frame/session/src/migrations/historical.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b6838099837a00a7f440cf8229fbdc2c9bd5b896
--- /dev/null
+++ b/substrate/frame/session/src/migrations/historical.rs
@@ -0,0 +1,196 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use core::str;
+use sp_io::hashing::twox_128;
+
+use frame_support::{
+	storage::{generator::StorageValue, StoragePrefixedMap},
+	traits::{
+		Get, GetStorageVersion, PalletInfoAccess, StorageVersion,
+		STORAGE_VERSION_STORAGE_KEY_POSTFIX,
+	},
+	weights::Weight,
+};
+
+use crate::historical as pallet_session_historical;
+
+const LOG_TARGET: &str = "runtime::session_historical";
+
+const OLD_PREFIX: &str = "Session";
+
+/// Migrate the entire storage of this pallet to a new prefix.
+///
+/// This new prefix must be the same as the one set in construct_runtime.
+///
+/// The migration will look into the storage version in order not to trigger a migration on an up
+/// to date storage. Thus the on chain storage version must be less than 1 in order to trigger the
+/// migration.
+pub fn migrate<T: pallet_session_historical::Config, P: GetStorageVersion + PalletInfoAccess>(
+) -> Weight {
+	let new_pallet_name = <P as PalletInfoAccess>::name();
+
+	if new_pallet_name == OLD_PREFIX {
+		log::info!(
+			target: LOG_TARGET,
+			"New pallet name is equal to the old prefix. No migration needs to be done.",
+		);
+		return Weight::zero()
+	}
+
+	let on_chain_storage_version = <P as GetStorageVersion>::on_chain_storage_version();
+	log::info!(
+		target: LOG_TARGET,
+		"Running migration to v1 for session_historical with storage version {:?}",
+		on_chain_storage_version,
+	);
+
+	if on_chain_storage_version < 1 {
+		let storage_prefix = pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
+		frame_support::storage::migration::move_storage_from_pallet(
+			storage_prefix,
+			OLD_PREFIX.as_bytes(),
+			new_pallet_name.as_bytes(),
+		);
+		log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
+
+		let storage_prefix = pallet_session_historical::StoredRange::<T>::storage_prefix();
+		frame_support::storage::migration::move_storage_from_pallet(
+			storage_prefix,
+			OLD_PREFIX.as_bytes(),
+			new_pallet_name.as_bytes(),
+		);
+		log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
+
+		StorageVersion::new(1).put::<P>();
+		<T as frame_system::Config>::BlockWeights::get().max_block
+	} else {
+		log::warn!(
+			target: LOG_TARGET,
+			"Attempted to apply migration to v1 but failed because storage version is {:?}",
+			on_chain_storage_version,
+		);
+		Weight::zero()
+	}
+}
+
+/// Some checks prior to migration. This can be linked to
+/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing.
+///
+/// Panics if anything goes wrong.
+pub fn pre_migrate<
+	T: pallet_session_historical::Config,
+	P: GetStorageVersion + PalletInfoAccess,
+>() {
+	let new_pallet_name = <P as PalletInfoAccess>::name();
+
+	let storage_prefix_historical_sessions =
+		pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
+	let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
+
+	log_migration("pre-migration", storage_prefix_historical_sessions, OLD_PREFIX, new_pallet_name);
+	log_migration("pre-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
+
+	if new_pallet_name == OLD_PREFIX {
+		return
+	}
+
+	let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
+	let storage_version_key = twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX);
+
+	let mut new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
+		new_pallet_prefix.to_vec(),
+		new_pallet_prefix.to_vec(),
+		|key| Ok(key.to_vec()),
+	);
+
+	// Ensure nothing except the storage_version_key is stored in the new prefix.
+	assert!(new_pallet_prefix_iter.all(|key| key == storage_version_key));
+
+	assert!(<P as GetStorageVersion>::on_chain_storage_version() < 1);
+}
+
+/// Some checks for after migration. This can be linked to
+/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing.
+///
+/// Panics if anything goes wrong.
+pub fn post_migrate<
+	T: pallet_session_historical::Config,
+	P: GetStorageVersion + PalletInfoAccess,
+>() {
+	let new_pallet_name = <P as PalletInfoAccess>::name();
+
+	let storage_prefix_historical_sessions =
+		pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
+	let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
+
+	log_migration(
+		"post-migration",
+		storage_prefix_historical_sessions,
+		OLD_PREFIX,
+		new_pallet_name,
+	);
+	log_migration("post-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
+
+	if new_pallet_name == OLD_PREFIX {
+		return
+	}
+
+	// Assert that no `HistoricalSessions` and `StoredRange` storages remains at the old prefix.
+	let old_pallet_prefix = twox_128(OLD_PREFIX.as_bytes());
+	let old_historical_sessions_key =
+		[&old_pallet_prefix, &twox_128(storage_prefix_historical_sessions)[..]].concat();
+	let old_historical_sessions_key_iter = frame_support::storage::KeyPrefixIterator::new(
+		old_historical_sessions_key.to_vec(),
+		old_historical_sessions_key.to_vec(),
+		|_| Ok(()),
+	);
+	assert_eq!(old_historical_sessions_key_iter.count(), 0);
+
+	let old_stored_range_key =
+		[&old_pallet_prefix, &twox_128(storage_prefix_stored_range)[..]].concat();
+	let old_stored_range_key_iter = frame_support::storage::KeyPrefixIterator::new(
+		old_stored_range_key.to_vec(),
+		old_stored_range_key.to_vec(),
+		|_| Ok(()),
+	);
+	assert_eq!(old_stored_range_key_iter.count(), 0);
+
+	// Assert that the `HistoricalSessions` and `StoredRange` storages (if they exist) have been
+	// moved to the new prefix.
+	// NOTE: storage_version_key is already in the new prefix.
+	let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
+	let new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
+		new_pallet_prefix.to_vec(),
+		new_pallet_prefix.to_vec(),
+		|_| Ok(()),
+	);
+	assert!(new_pallet_prefix_iter.count() >= 1);
+
+	assert_eq!(<P as GetStorageVersion>::on_chain_storage_version(), 1);
+}
+
+fn log_migration(stage: &str, storage_prefix: &[u8], old_pallet_name: &str, new_pallet_name: &str) {
+	log::info!(
+		target: LOG_TARGET,
+		"{} prefix of storage '{}': '{}' ==> '{}'",
+		stage,
+		str::from_utf8(storage_prefix).unwrap_or("<Invalid UTF8>"),
+		old_pallet_name,
+		new_pallet_name,
+	);
+}
diff --git a/substrate/frame/session/src/migrations/mod.rs b/substrate/frame/session/src/migrations/mod.rs
index 3b15d0ac4646abaef577e136fd35a4d1a8840344..730dd9c69edce5c2a4eaa79939f95c090dca1147 100644
--- a/substrate/frame/session/src/migrations/mod.rs
+++ b/substrate/frame/session/src/migrations/mod.rs
@@ -21,4 +21,5 @@
 /// In version 1 it uses its name as configured in `construct_runtime`.
 /// This migration moves session historical pallet storages from old prefix to new prefix.
 #[cfg(feature = "historical")]
+pub mod historical;
 pub mod v1;
diff --git a/substrate/frame/session/src/migrations/v1.rs b/substrate/frame/session/src/migrations/v1.rs
index b6838099837a00a7f440cf8229fbdc2c9bd5b896..bac0af6fe6b0f604eb1eecb898d68c33f62da282 100644
--- a/substrate/frame/session/src/migrations/v1.rs
+++ b/substrate/frame/session/src/migrations/v1.rs
@@ -15,182 +15,93 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-use core::str;
-use sp_io::hashing::twox_128;
-
+use crate::{Config, DisabledValidators as NewDisabledValidators, Pallet, Perbill, Vec};
 use frame_support::{
-	storage::{generator::StorageValue, StoragePrefixedMap},
-	traits::{
-		Get, GetStorageVersion, PalletInfoAccess, StorageVersion,
-		STORAGE_VERSION_STORAGE_KEY_POSTFIX,
-	},
-	weights::Weight,
+	pallet_prelude::{Get, ValueQuery, Weight},
+	traits::UncheckedOnRuntimeUpgrade,
 };
+use sp_staking::offence::OffenceSeverity;
 
-use crate::historical as pallet_session_historical;
+#[cfg(feature = "try-runtime")]
+use sp_runtime::TryRuntimeError;
 
-const LOG_TARGET: &str = "runtime::session_historical";
+#[cfg(feature = "try-runtime")]
+use frame_support::ensure;
+use frame_support::migrations::VersionedMigration;
 
-const OLD_PREFIX: &str = "Session";
+/// This is the storage getting migrated.
+#[frame_support::storage_alias]
+type DisabledValidators<T: Config> = StorageValue<Pallet<T>, Vec<u32>, ValueQuery>;
 
-/// Migrate the entire storage of this pallet to a new prefix.
-///
-/// This new prefix must be the same as the one set in construct_runtime.
-///
-/// The migration will look into the storage version in order not to trigger a migration on an up
-/// to date storage. Thus the on chain storage version must be less than 1 in order to trigger the
-/// migration.
-pub fn migrate<T: pallet_session_historical::Config, P: GetStorageVersion + PalletInfoAccess>(
-) -> Weight {
-	let new_pallet_name = <P as PalletInfoAccess>::name();
+pub trait MigrateDisabledValidators {
+	/// Peek the list of disabled validators and their offence severity.
+	#[cfg(feature = "try-runtime")]
+	fn peek_disabled() -> Vec<(u32, OffenceSeverity)>;
 
-	if new_pallet_name == OLD_PREFIX {
-		log::info!(
-			target: LOG_TARGET,
-			"New pallet name is equal to the old prefix. No migration needs to be done.",
-		);
-		return Weight::zero()
-	}
+	/// Return the list of disabled validators and their offence severity, removing them from the
+	/// underlying storage.
+	fn take_disabled() -> Vec<(u32, OffenceSeverity)>;
+}
 
-	let on_chain_storage_version = <P as GetStorageVersion>::on_chain_storage_version();
-	log::info!(
-		target: LOG_TARGET,
-		"Running migration to v1 for session_historical with storage version {:?}",
-		on_chain_storage_version,
-	);
-
-	if on_chain_storage_version < 1 {
-		let storage_prefix = pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
-		frame_support::storage::migration::move_storage_from_pallet(
-			storage_prefix,
-			OLD_PREFIX.as_bytes(),
-			new_pallet_name.as_bytes(),
-		);
-		log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
+pub struct InitOffenceSeverity<T>(core::marker::PhantomData<T>);
+impl<T: Config> MigrateDisabledValidators for InitOffenceSeverity<T> {
+	#[cfg(feature = "try-runtime")]
+	fn peek_disabled() -> Vec<(u32, OffenceSeverity)> {
+		DisabledValidators::<T>::get()
+			.iter()
+			.map(|v| (*v, OffenceSeverity(Perbill::zero())))
+			.collect::<Vec<_>>()
+	}
 
-		let storage_prefix = pallet_session_historical::StoredRange::<T>::storage_prefix();
-		frame_support::storage::migration::move_storage_from_pallet(
-			storage_prefix,
-			OLD_PREFIX.as_bytes(),
-			new_pallet_name.as_bytes(),
-		);
-		log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
-
-		StorageVersion::new(1).put::<P>();
-		<T as frame_system::Config>::BlockWeights::get().max_block
-	} else {
-		log::warn!(
-			target: LOG_TARGET,
-			"Attempted to apply migration to v1 but failed because storage version is {:?}",
-			on_chain_storage_version,
-		);
-		Weight::zero()
+	fn take_disabled() -> Vec<(u32, OffenceSeverity)> {
+		DisabledValidators::<T>::take()
+			.iter()
+			.map(|v| (*v, OffenceSeverity(Perbill::zero())))
+			.collect::<Vec<_>>()
 	}
 }
-
-/// Some checks prior to migration. This can be linked to
-/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing.
-///
-/// Panics if anything goes wrong.
-pub fn pre_migrate<
-	T: pallet_session_historical::Config,
-	P: GetStorageVersion + PalletInfoAccess,
->() {
-	let new_pallet_name = <P as PalletInfoAccess>::name();
-
-	let storage_prefix_historical_sessions =
-		pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
-	let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
-
-	log_migration("pre-migration", storage_prefix_historical_sessions, OLD_PREFIX, new_pallet_name);
-	log_migration("pre-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
-
-	if new_pallet_name == OLD_PREFIX {
-		return
+pub struct VersionUncheckedMigrateV0ToV1<T, S: MigrateDisabledValidators>(
+	core::marker::PhantomData<(T, S)>,
+);
+
+impl<T: Config, S: MigrateDisabledValidators> UncheckedOnRuntimeUpgrade
+	for VersionUncheckedMigrateV0ToV1<T, S>
+{
+	fn on_runtime_upgrade() -> Weight {
+		let disabled = S::take_disabled();
+		NewDisabledValidators::<T>::put(disabled);
+
+		T::DbWeight::get().reads_writes(1, 1)
 	}
 
-	let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
-	let storage_version_key = twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX);
+	#[cfg(feature = "try-runtime")]
+	fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
+		let source_disabled = S::peek_disabled().iter().map(|(v, _s)| *v).collect::<Vec<_>>();
+		let existing_disabled = DisabledValidators::<T>::get();
 
-	let mut new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
-		new_pallet_prefix.to_vec(),
-		new_pallet_prefix.to_vec(),
-		|key| Ok(key.to_vec()),
-	);
+		ensure!(source_disabled == existing_disabled, "Disabled validators mismatch");
+		ensure!(
+			NewDisabledValidators::<T>::get().len() == crate::Validators::<T>::get().len(),
+			"Disabled validators mismatch"
+		);
+		Ok(Vec::new())
+	}
+	#[cfg(feature = "try-runtime")]
+	fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
+		let validators_max_index = crate::Validators::<T>::get().len() as u32 - 1;
 
-	// Ensure nothing except the storage_version_key is stored in the new prefix.
-	assert!(new_pallet_prefix_iter.all(|key| key == storage_version_key));
+		for (v, _s) in NewDisabledValidators::<T>::get() {
+			ensure!(v <= validators_max_index, "Disabled validator index out of bounds");
+		}
 
-	assert!(<P as GetStorageVersion>::on_chain_storage_version() < 1);
-}
-
-/// Some checks for after migration. This can be linked to
-/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing.
-///
-/// Panics if anything goes wrong.
-pub fn post_migrate<
-	T: pallet_session_historical::Config,
-	P: GetStorageVersion + PalletInfoAccess,
->() {
-	let new_pallet_name = <P as PalletInfoAccess>::name();
-
-	let storage_prefix_historical_sessions =
-		pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
-	let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
-
-	log_migration(
-		"post-migration",
-		storage_prefix_historical_sessions,
-		OLD_PREFIX,
-		new_pallet_name,
-	);
-	log_migration("post-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
-
-	if new_pallet_name == OLD_PREFIX {
-		return
+		Ok(())
 	}
-
-	// Assert that no `HistoricalSessions` and `StoredRange` storages remains at the old prefix.
-	let old_pallet_prefix = twox_128(OLD_PREFIX.as_bytes());
-	let old_historical_sessions_key =
-		[&old_pallet_prefix, &twox_128(storage_prefix_historical_sessions)[..]].concat();
-	let old_historical_sessions_key_iter = frame_support::storage::KeyPrefixIterator::new(
-		old_historical_sessions_key.to_vec(),
-		old_historical_sessions_key.to_vec(),
-		|_| Ok(()),
-	);
-	assert_eq!(old_historical_sessions_key_iter.count(), 0);
-
-	let old_stored_range_key =
-		[&old_pallet_prefix, &twox_128(storage_prefix_stored_range)[..]].concat();
-	let old_stored_range_key_iter = frame_support::storage::KeyPrefixIterator::new(
-		old_stored_range_key.to_vec(),
-		old_stored_range_key.to_vec(),
-		|_| Ok(()),
-	);
-	assert_eq!(old_stored_range_key_iter.count(), 0);
-
-	// Assert that the `HistoricalSessions` and `StoredRange` storages (if they exist) have been
-	// moved to the new prefix.
-	// NOTE: storage_version_key is already in the new prefix.
-	let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
-	let new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
-		new_pallet_prefix.to_vec(),
-		new_pallet_prefix.to_vec(),
-		|_| Ok(()),
-	);
-	assert!(new_pallet_prefix_iter.count() >= 1);
-
-	assert_eq!(<P as GetStorageVersion>::on_chain_storage_version(), 1);
 }
 
-fn log_migration(stage: &str, storage_prefix: &[u8], old_pallet_name: &str, new_pallet_name: &str) {
-	log::info!(
-		target: LOG_TARGET,
-		"{} prefix of storage '{}': '{}' ==> '{}'",
-		stage,
-		str::from_utf8(storage_prefix).unwrap_or("<Invalid UTF8>"),
-		old_pallet_name,
-		new_pallet_name,
-	);
-}
+pub type MigrateV0ToV1<T, S> = VersionedMigration<
+	0,
+	1,
+	VersionUncheckedMigrateV0ToV1<T, S>,
+	Pallet<T>,
+	<T as frame_system::Config>::DbWeight,
+>;
diff --git a/substrate/frame/session/src/mock.rs b/substrate/frame/session/src/mock.rs
index 745b57d1be41fa13c65c1494d26f2b685666270c..ac8f9d320d75a8baaf06cc38f0cc0646decc28f6 100644
--- a/substrate/frame/session/src/mock.rs
+++ b/substrate/frame/session/src/mock.rs
@@ -248,6 +248,10 @@ impl Convert<u64, Option<u64>> for TestValidatorIdOf {
 	}
 }
 
+// Disabling threshold for `UpToLimitDisablingStrategy` and
+// `UpToLimitWithReEnablingDisablingStrategy``
+pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3;
+
 impl Config for Test {
 	type ShouldEndSession = TestShouldEndSession;
 	#[cfg(feature = "historical")]
@@ -260,6 +264,8 @@ impl Config for Test {
 	type Keys = MockSessionKeys;
 	type RuntimeEvent = RuntimeEvent;
 	type NextSessionRotation = ();
+	type DisablingStrategy =
+		disabling::UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>;
 	type WeightInfo = ();
 }
 
diff --git a/substrate/frame/session/src/tests.rs b/substrate/frame/session/src/tests.rs
index f392c2ab7663c73d4b17443d7f9918ca410eceb8..42aeb8e14c36447b8a6fb1421bd9a78a68b59138 100644
--- a/substrate/frame/session/src/tests.rs
+++ b/substrate/frame/session/src/tests.rs
@@ -477,8 +477,8 @@ fn test_migration_v1() {
 		);
 		StorageVersion::new(0).put::<Historical>();
 
-		crate::migrations::v1::pre_migrate::<Test, Historical>();
-		crate::migrations::v1::migrate::<Test, Historical>();
-		crate::migrations::v1::post_migrate::<Test, Historical>();
+		crate::migrations::historical::pre_migrate::<Test, Historical>();
+		crate::migrations::historical::migrate::<Test, Historical>();
+		crate::migrations::historical::post_migrate::<Test, Historical>();
 	});
 }
diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs
index 39aa6bf3566b2950248254fea242154a1ccebfb7..03726e6215ca1d0a71fc703e27f49ed3a0fdf85d 100644
--- a/substrate/frame/society/src/lib.rs
+++ b/substrate/frame/society/src/lib.rs
@@ -658,7 +658,7 @@ pub mod pallet {
 
 	/// The max number of members for the society at one time.
 	#[pallet::storage]
-	pub(super) type Parameters<T: Config<I>, I: 'static = ()> =
+	pub type Parameters<T: Config<I>, I: 'static = ()> =
 		StorageValue<_, GroupParamsFor<T, I>, OptionQuery>;
 
 	/// Amount of our account balance that is specifically for the next round's bid(s).
@@ -709,7 +709,7 @@ pub mod pallet {
 
 	/// The current bids, stored ordered by the value of the bid.
 	#[pallet::storage]
-	pub(super) type Bids<T: Config<I>, I: 'static = ()> =
+	pub type Bids<T: Config<I>, I: 'static = ()> =
 		StorageValue<_, BoundedVec<Bid<T::AccountId, BalanceOf<T, I>>, T::MaxBids>, ValueQuery>;
 
 	#[pallet::storage]
@@ -727,7 +727,7 @@ pub mod pallet {
 
 	/// Double map from Candidate -> Voter -> (Maybe) Vote.
 	#[pallet::storage]
-	pub(super) type Votes<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type Votes<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Twox64Concat,
 		T::AccountId,
@@ -739,7 +739,7 @@ pub mod pallet {
 
 	/// Clear-cursor for Vote, map from Candidate -> (Maybe) Cursor.
 	#[pallet::storage]
-	pub(super) type VoteClearCursor<T: Config<I>, I: 'static = ()> =
+	pub type VoteClearCursor<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Twox64Concat, T::AccountId, BoundedVec<u8, KeyLenOf<Votes<T, I>>>>;
 
 	/// At the end of the claim period, this contains the most recently approved members (along with
@@ -751,17 +751,17 @@ pub mod pallet {
 
 	/// The number of challenge rounds there have been. Used to identify stale DefenderVotes.
 	#[pallet::storage]
-	pub(super) type ChallengeRoundCount<T: Config<I>, I: 'static = ()> =
+	pub type ChallengeRoundCount<T: Config<I>, I: 'static = ()> =
 		StorageValue<_, RoundIndex, ValueQuery>;
 
 	/// The defending member currently being challenged, along with a running tally of votes.
 	#[pallet::storage]
-	pub(super) type Defending<T: Config<I>, I: 'static = ()> =
+	pub type Defending<T: Config<I>, I: 'static = ()> =
 		StorageValue<_, (T::AccountId, T::AccountId, Tally)>;
 
 	/// Votes for the defender, keyed by challenge round.
 	#[pallet::storage]
-	pub(super) type DefenderVotes<T: Config<I>, I: 'static = ()> =
+	pub type DefenderVotes<T: Config<I>, I: 'static = ()> =
 		StorageDoubleMap<_, Twox64Concat, RoundIndex, Twox64Concat, T::AccountId, Vote>;
 
 	#[pallet::hooks]
diff --git a/substrate/frame/staking/ah-client/Cargo.toml b/substrate/frame/staking/ah-client/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..4c41380e48eddc7cc29cf429eb43fb76da948436
--- /dev/null
+++ b/substrate/frame/staking/ah-client/Cargo.toml
@@ -0,0 +1,66 @@
+[package]
+name = "pallet-staking-ah-client"
+description = "Pallet handling the communication with staking-rc-client. It's role is to glue the staking pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way."
+license = "Apache-2.0"
+version = "0.1.0"
+edition.workspace = true
+authors.workspace = true
+repository.workspace = true
+
+[dependencies]
+codec = { workspace = true, features = ["derive"] }
+frame-support = { workspace = true }
+frame-system = { workspace = true }
+log = { workspace = true }
+pallet-authorship = { workspace = true }
+pallet-session = { features = ["historical"], workspace = true }
+pallet-staking = { workspace = true }
+pallet-staking-rc-client = { workspace = true }
+polkadot-primitives = { workspace = true }
+polkadot-runtime-parachains = { workspace = true }
+scale-info = { workspace = true, features = ["derive"] }
+sp-core = { workspace = true }
+sp-runtime = { workspace = true }
+sp-staking = { workspace = true }
+xcm = { workspace = true }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"pallet-authorship/std",
+	"pallet-session/std",
+	"pallet-staking-rc-client/std",
+	"pallet-staking/std",
+	"polkadot-primitives/std",
+	"polkadot-runtime-parachains/std",
+	"scale-info/std",
+	"sp-core/std",
+	"sp-runtime/std",
+	"sp-staking/std",
+	"xcm/std",
+]
+runtime-benchmarks = [
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"pallet-staking-rc-client/runtime-benchmarks",
+	"pallet-staking/runtime-benchmarks",
+	"polkadot-primitives/runtime-benchmarks",
+	"polkadot-runtime-parachains/runtime-benchmarks",
+	"sp-runtime/runtime-benchmarks",
+	"sp-staking/runtime-benchmarks",
+	"xcm/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"pallet-authorship/try-runtime",
+	"pallet-session/try-runtime",
+	"pallet-staking-rc-client/try-runtime",
+	"pallet-staking/try-runtime",
+	"polkadot-runtime-parachains/try-runtime",
+	"sp-runtime/try-runtime",
+]
diff --git a/substrate/frame/staking/ah-client/src/lib.rs b/substrate/frame/staking/ah-client/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..88aee9ee3e9da3f7ce327088d56b29ccd2464643
--- /dev/null
+++ b/substrate/frame/staking/ah-client/src/lib.rs
@@ -0,0 +1,322 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This pallet is intended to be used on a relay chain and to communicate with its counterpart on
+//! AssetHub (or a similar network) named `pallet-staking-rc-client`.
+//!
+//! This pallet serves as an interface between the staking pallet on AssetHub and the session pallet
+//! on the relay chain. From the relay chain to AssetHub, its responsibilities are to send
+//! information about session changes (start and end) and to report offenses. From AssetHub to the
+//! relay chain, it receives information about the potentially new validator set for the session.
+//!
+//! All the communication between the two pallets is performed with XCM messages.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+extern crate alloc;
+
+use alloc::vec::Vec;
+use frame_support::pallet_prelude::*;
+use pallet_staking_rc_client::Offence;
+use sp_core::crypto::AccountId32;
+use sp_runtime::traits::Convert;
+use sp_staking::{offence::OffenceDetails, Exposure, SessionIndex};
+use xcm::prelude::*;
+
+const LOG_TARGET: &str = "runtime::staking::ah-client";
+
+/// `pallet-staking-rc-client` pallet index on AssetHub. Used to construct remote calls.
+///
+/// The codec index must correspond to the index of `pallet-staking-rc-client` in the
+/// `construct_runtime` of AssetHub.
+#[derive(Encode, Decode)]
+enum AssetHubRuntimePallets {
+	#[codec(index = 50)]
+	RcClient(StakingCalls),
+}
+
+/// Call encoding for the calls needed from the rc-client pallet.
+#[derive(Encode, Decode)]
+enum StakingCalls {
+	/// A session with the given index has started.
+	#[codec(index = 0)]
+	RelayChainSessionStart(SessionIndex),
+	// A session with the given index has ended. The block authors with their corresponding
+	// session points are provided.
+	#[codec(index = 1)]
+	RelayChainSessionEnd(SessionIndex, Vec<(AccountId32, u32)>),
+	/// Report one or more offences.
+	#[codec(index = 2)]
+	NewRelayChainOffences(SessionIndex, Vec<Offence>),
+}
+
+#[frame_support::pallet(dev_mode)]
+pub mod pallet {
+	use crate::*;
+	use alloc::vec;
+	use core::result;
+	use frame_system::pallet_prelude::*;
+	use pallet_session::historical;
+	use pallet_staking::ExposureOf;
+	use polkadot_primitives::Id as ParaId;
+	use polkadot_runtime_parachains::origin::{ensure_parachain, Origin};
+	use sp_runtime::Perbill;
+	use sp_staking::{offence::OnOffenceHandler, SessionIndex};
+
+	const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
+
+	/// The balance type of this pallet.
+	pub type BalanceOf<T> = <T as Config>::CurrencyBalance;
+
+	// `Exposure<T::AccountId, BalanceOf<T>>` will be removed. This type alias exists only to
+	// suppress clippy warnings.
+	type ElectedValidatorSet<T> = Vec<(
+		<T as frame_system::Config>::AccountId,
+		Exposure<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
+	)>;
+
+	#[pallet::pallet]
+	#[pallet::storage_version(STORAGE_VERSION)]
+	pub struct Pallet<T>(_);
+
+	// TODO: should contain some initial state, otherwise starting from genesis won't work
+	#[pallet::storage]
+	pub type ValidatorSet<T: Config> = StorageValue<_, Option<ElectedValidatorSet<T>>, ValueQuery>;
+
+	/// Keeps track of the session points for each block author in the current session.
+	#[pallet::storage]
+	pub type BlockAuthors<T: Config> = StorageMap<_, Twox64Concat, AccountId32, u32, ValueQuery>;
+
+	#[pallet::config]
+	pub trait Config: frame_system::Config {
+		type RuntimeOrigin: From<<Self as frame_system::Config>::RuntimeOrigin>
+			+ Into<result::Result<Origin, <Self as Config>::RuntimeOrigin>>;
+		/// Just the `Currency::Balance` type; we have this item to allow us to constrain it to
+		/// `From<u64>`.
+		type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned
+			+ codec::FullCodec
+			+ Copy
+			+ MaybeSerializeDeserialize
+			+ core::fmt::Debug
+			+ Default
+			+ From<u64>
+			+ TypeInfo
+			+ Send
+			+ Sync
+			+ MaxEncodedLen;
+		/// The ParaId of the AssetHub.
+		#[pallet::constant]
+		type AssetHubId: Get<u32>;
+		/// The XCM sender.
+		type SendXcm: SendXcm;
+	}
+
+	#[pallet::error]
+	pub enum Error<T> {
+		/// The ParaId making the call is not AssetHub.
+		NotAssetHub,
+	}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		#[pallet::call_index(0)]
+		// #[pallet::weight(T::WeightInfo::new_validators())] // TODO
+		pub fn new_validator_set(
+			origin: OriginFor<T>,
+			new_validator_set: ElectedValidatorSet<T>,
+		) -> DispatchResult {
+			// Ignore requests not coming from the AssetHub or root.
+			Self::ensure_root_or_para(origin, <T as Config>::AssetHubId::get().into())?;
+
+			// Save the validator set. We don't care if there is a validator set which was not used.
+			ValidatorSet::<T>::put(Some(new_validator_set));
+
+			Ok(())
+		}
+	}
+
+	impl<T: Config> historical::SessionManager<T::AccountId, Exposure<T::AccountId, BalanceOf<T>>>
+		for Pallet<T>
+	{
+		fn new_session(_: sp_staking::SessionIndex) -> Option<ElectedValidatorSet<T>> {
+			// If there is a new validator set - return it. Otherwise return `None`.
+			ValidatorSet::<T>::take()
+		}
+
+		fn new_session_genesis(
+			_: SessionIndex,
+		) -> Option<Vec<(T::AccountId, Exposure<T::AccountId, BalanceOf<T>>)>> {
+			ValidatorSet::<T>::take()
+		}
+
+		fn start_session(start_index: SessionIndex) {
+			<Self as pallet_session::SessionManager<_>>::start_session(start_index)
+		}
+
+		fn end_session(end_index: SessionIndex) {
+			<Self as pallet_session::SessionManager<_>>::end_session(end_index)
+		}
+	}
+
+	impl<T: Config> pallet_session::SessionManager<T::AccountId> for Pallet<T> {
+		fn new_session(_: u32) -> Option<Vec<<T as frame_system::Config>::AccountId>> {
+			// Doesn't do anything because all the logic is handled in `historical::SessionManager`
+			// implementation
+			defensive!("new_session should not be called");
+			None
+		}
+
+		fn end_session(session_index: u32) {
+			let authors = BlockAuthors::<T>::iter().collect::<Vec<_>>();
+			// The maximum number of block authors is `num_cores * max_validators_per_core` (both
+			// are parameters from [`SchedulerParams`]).
+			let _ = BlockAuthors::<T>::clear(u32::MAX, None);
+
+			let message = Xcm(vec![
+				Instruction::UnpaidExecution {
+					weight_limit: WeightLimit::Unlimited,
+					check_origin: None,
+				},
+				mk_asset_hub_call(StakingCalls::RelayChainSessionEnd(session_index, authors)),
+			]);
+
+			if let Err(err) = send_xcm::<T::SendXcm>(
+				Location::new(0, [Junction::Parachain(T::AssetHubId::get())]),
+				message,
+			) {
+				log::error!(target: LOG_TARGET, "Sending `RelayChainSessionEnd` to AssetHub failed: {:?}", err);
+			}
+		}
+
+		fn start_session(session_index: u32) {
+			let message = Xcm(vec![
+				Instruction::UnpaidExecution {
+					weight_limit: WeightLimit::Unlimited,
+					check_origin: None,
+				},
+				mk_asset_hub_call(StakingCalls::RelayChainSessionStart(session_index)),
+			]);
+			if let Err(err) = send_xcm::<T::SendXcm>(
+				Location::new(0, [Junction::Parachain(T::AssetHubId::get())]),
+				message,
+			) {
+				log::error!(target: LOG_TARGET, "Sending `RelayChainSessionStart` to AssetHub failed: {:?}", err);
+			}
+		}
+	}
+
+	impl<T> pallet_authorship::EventHandler<T::AccountId, BlockNumberFor<T>> for Pallet<T>
+	where
+		T: Config + pallet_authorship::Config + pallet_session::Config + Config,
+		T::AccountId: Into<AccountId32>,
+	{
+		// Notes the authored block in `BlockAuthors`.
+		fn note_author(author: T::AccountId) {
+			BlockAuthors::<T>::mutate(author.into(), |block_count| {
+				*block_count += 1;
+			});
+		}
+	}
+
+	impl<T: Config>
+		OnOffenceHandler<T::AccountId, pallet_session::historical::IdentificationTuple<T>, Weight>
+		for Pallet<T>
+	where
+		T: pallet_session::Config<ValidatorId = <T as frame_system::Config>::AccountId>,
+		T: pallet_session::historical::Config<
+			FullIdentification = Exposure<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
+			FullIdentificationOf = ExposureOf<T>,
+		>,
+		T::SessionHandler: pallet_session::SessionHandler<<T as frame_system::Config>::AccountId>,
+		T::SessionManager: pallet_session::SessionManager<<T as frame_system::Config>::AccountId>,
+		T::ValidatorIdOf: Convert<
+			<T as frame_system::Config>::AccountId,
+			Option<<T as frame_system::Config>::AccountId>,
+		>,
+		T::AccountId: Into<AccountId32>,
+	{
+		fn on_offence(
+			offenders: &[OffenceDetails<
+				T::AccountId,
+				pallet_session::historical::IdentificationTuple<T>,
+			>],
+			slash_fraction: &[Perbill],
+			slash_session: SessionIndex,
+		) -> Weight {
+			let offenders_and_slashes = offenders
+				.iter()
+				.cloned()
+				.zip(slash_fraction)
+				.map(|(offence, fraction)| {
+					Offence::new(
+						offence.offender.0.into(),
+						offence.reporters.into_iter().map(|r| r.into()).collect(),
+						*fraction,
+					)
+				})
+				.collect::<Vec<_>>();
+
+			// send the offender immediately over xcm
+			let message = Xcm(vec![
+				Instruction::UnpaidExecution {
+					weight_limit: WeightLimit::Unlimited,
+					check_origin: None,
+				},
+				mk_asset_hub_call(StakingCalls::NewRelayChainOffences(
+					slash_session,
+					offenders_and_slashes,
+				)),
+			]);
+			if let Err(err) = send_xcm::<T::SendXcm>(
+				Location::new(0, [Junction::Parachain(T::AssetHubId::get())]),
+				message,
+			) {
+				log::error!(target: LOG_TARGET, "Sending `NewRelayChainOffences` to AssetHub failed: {:?}",
+			err);
+			}
+
+			Weight::zero()
+		}
+	}
+
+	impl<T: Config> Pallet<T> {
+		/// Ensure the origin is one of Root or the `para` itself.
+		fn ensure_root_or_para(
+			origin: <T as frame_system::Config>::RuntimeOrigin,
+			id: ParaId,
+		) -> DispatchResult {
+			if let Ok(caller_id) =
+				ensure_parachain(<T as Config>::RuntimeOrigin::from(origin.clone()))
+			{
+				// Check if matching para id...
+				ensure!(caller_id == id, Error::<T>::NotAssetHub);
+			} else {
+				// Check if root...
+				ensure_root(origin.clone())?;
+			}
+			Ok(())
+		}
+	}
+
+	fn mk_asset_hub_call(call: StakingCalls) -> Instruction<()> {
+		Instruction::Transact {
+			origin_kind: OriginKind::Superuser,
+			fallback_max_weight: None,
+			call: AssetHubRuntimePallets::RcClient(call).encode().into(),
+		}
+	}
+}
diff --git a/substrate/frame/staking/rc-client/Cargo.toml b/substrate/frame/staking/rc-client/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..5498cae777e12c2eea09eaae2db9828afad21196
--- /dev/null
+++ b/substrate/frame/staking/rc-client/Cargo.toml
@@ -0,0 +1,45 @@
+[package]
+name = "pallet-staking-rc-client"
+description = "Pallet handling the communication with staking-ah-client. It's role is to glue the staking pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way."
+license = "Apache-2.0"
+version = "0.1.0"
+edition.workspace = true
+authors.workspace = true
+repository.workspace = true
+
+[dependencies]
+codec = { workspace = true, features = ["derive"] }
+frame-support = { workspace = true }
+frame-system = { workspace = true }
+log = { workspace = true }
+scale-info = { workspace = true, features = ["derive"] }
+sp-core = { workspace = true }
+sp-runtime = { features = ["serde"], workspace = true }
+sp-staking = { features = ["serde"], workspace = true }
+xcm = { workspace = true }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"scale-info/std",
+	"sp-core/std",
+	"sp-runtime/std",
+	"sp-staking/std",
+	"xcm/std",
+]
+runtime-benchmarks = [
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"sp-runtime/runtime-benchmarks",
+	"sp-staking/runtime-benchmarks",
+	"xcm/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"sp-runtime/try-runtime",
+]
diff --git a/substrate/frame/staking/rc-client/src/lib.rs b/substrate/frame/staking/rc-client/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..dc6c0b7e5c6fc4d8fc9c9e7196cf9b76890451d9
--- /dev/null
+++ b/substrate/frame/staking/rc-client/src/lib.rs
@@ -0,0 +1,181 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This pallet is intended to be used on AssetHub. It provides extrinsics used by
+//! `pallet-staking-ah-client` and serves as an interface between the relay chain and the staking
+//! pallet on AssetHub.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+extern crate alloc;
+
+use alloc::vec::Vec;
+use frame_support::pallet_prelude::*;
+use sp_core::crypto::AccountId32;
+use sp_runtime::Perbill;
+use sp_staking::SessionIndex;
+use xcm::prelude::*;
+
+const LOG_TARGET: &str = "runtime::staking::rc-client";
+
+// Provides to the pallet a validator set produced by an election or other similar mechanism.
+pub trait ElectionResultHandler<ValidatorId> {
+	fn handle_election_result(result: Vec<ValidatorId>);
+}
+
+// API provided by the staking pallet.
+pub trait StakingApi {
+	/// New session with index `start_index` has started on the relay chain.
+	fn on_relay_chain_session_start(start_index: SessionIndex);
+	/// A session with index `end_index` has ended on the relay chain. The block authors and their
+	/// corresponding session points are reported.
+	fn on_relay_chain_session_end(end_index: SessionIndex, block_authors: Vec<(AccountId32, u32)>);
+	/// Report one or more offences on the relay chain.
+	fn on_new_offences(offences: Vec<Offence>);
+}
+
+/// `pallet-staking-ah-client` pallet index on Relay chain. Used to construct remote calls.
+///
+/// The codec index must correspond to the index of `pallet-staking-ah-client` in the
+/// `construct_runtime` of the Relay chain.
+#[derive(Encode, Decode)]
+enum RelayChainRuntimePallets {
+	#[codec(index = 50)]
+	AhClient(SessionCalls),
+}
+
+/// Call encoding for the calls needed from the pallet.
+#[derive(Encode, Decode)]
+enum SessionCalls {
+	#[codec(index = 0)]
+	NewValidatorSet(Vec<AccountId32>),
+}
+
+// An offence on the relay chain. Based on [`sp_staking::offence::OffenceDetails`].
+#[derive(Encode, Decode, Debug, Clone, PartialEq, TypeInfo)]
+pub struct Offence {
+	offender: AccountId32,
+	reporters: Vec<AccountId32>,
+	slash_fraction: Perbill,
+}
+
+impl Offence {
+	pub fn new(
+		offender: AccountId32,
+		reporters: Vec<AccountId32>,
+		slash_fraction: Perbill,
+	) -> Self {
+		Self { offender, reporters, slash_fraction }
+	}
+}
+
+#[frame_support::pallet(dev_mode)]
+pub mod pallet {
+	use super::*;
+	use alloc::vec;
+	use frame_system::pallet_prelude::*;
+
+	/// The in-code storage version.
+	const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
+
+	#[pallet::pallet]
+	#[pallet::storage_version(STORAGE_VERSION)]
+	pub struct Pallet<T>(_);
+
+	#[pallet::config]
+	pub trait Config: frame_system::Config {
+		type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
+		/// A stable ID for a validator.
+		type ValidatorId: Member
+			+ Parameter
+			+ MaybeSerializeDeserialize
+			+ MaxEncodedLen
+			+ TryFrom<Self::AccountId>;
+
+		/// Handler for staking calls
+		type StakingApi: StakingApi;
+		/// The XCM sender.
+		type SendXcm: SendXcm;
+	}
+
+	impl<T: Config, ValidatorId: Into<AccountId32>> ElectionResultHandler<ValidatorId> for Pallet<T> {
+		fn handle_election_result(result: Vec<ValidatorId>) {
+			let new_validator_set = result.into_iter().map(Into::into).collect::<Vec<_>>();
+
+			let message = Xcm(vec![
+				Instruction::UnpaidExecution {
+					weight_limit: WeightLimit::Unlimited,
+					check_origin: None,
+				},
+				mk_relay_chain_call(SessionCalls::NewValidatorSet(new_validator_set)),
+			]);
+
+			if let Err(err) = send_xcm::<T::SendXcm>(Location::new(1, Here), message) {
+				log::error!(target: LOG_TARGET, "Sending `NewValidators` to relay chain failed: {:?}", err);
+			}
+		}
+	}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		/// Called to indicate the start of a new session on the relay chain.
+		#[pallet::call_index(0)]
+		// #[pallet::weight(T::WeightInfo::end_session())] // TODO
+		pub fn relay_chain_session_start(
+			origin: OriginFor<T>,
+			start_index: SessionIndex,
+		) -> DispatchResult {
+			T::AdminOrigin::ensure_origin_or_root(origin)?;
+			T::StakingApi::on_relay_chain_session_start(start_index);
+			Ok(())
+		}
+
+		/// Called to indicate the end of a session on the relay chain. Accepts the session id and
+		/// the block authors with their corresponding session points for the finished session.
+		#[pallet::call_index(1)]
+		// #[pallet::weight(T::WeightInfo::end_session())] // TODO
+		pub fn relay_chain_session_end(
+			origin: OriginFor<T>,
+			end_index: SessionIndex,
+			block_authors: Vec<(AccountId32, u32)>,
+		) -> DispatchResult {
+			T::AdminOrigin::ensure_origin_or_root(origin)?;
+			T::StakingApi::on_relay_chain_session_end(end_index, block_authors);
+			Ok(())
+		}
+
+		/// Called to report one or more new offenses on the relay chain.
+		#[pallet::call_index(2)]
+		// #[pallet::weight(T::WeightInfo::end_session())] // TODO
+		pub fn new_relay_chain_offence(
+			origin: OriginFor<T>,
+			offences: Vec<Offence>,
+		) -> DispatchResult {
+			T::AdminOrigin::ensure_origin_or_root(origin)?;
+			T::StakingApi::on_new_offences(offences);
+			Ok(())
+		}
+	}
+
+	fn mk_relay_chain_call(call: SessionCalls) -> Instruction<()> {
+		Instruction::Transact {
+			origin_kind: OriginKind::Superuser,
+			fallback_max_weight: None,
+			call: RelayChainRuntimePallets::AhClient(call).encode().into(),
+		}
+	}
+}
diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs
index 0d084629d660b457c8c3e2a4957763d2fab03f51..1978449bb4ba8d1453d99b753ded280315e22175 100644
--- a/substrate/frame/staking/src/benchmarking.rs
+++ b/substrate/frame/staking/src/benchmarking.rs
@@ -802,21 +802,33 @@ mod benchmarks {
 
 	#[benchmark]
 	fn cancel_deferred_slash(s: Linear<1, MAX_SLASHES>) {
-		let mut unapplied_slashes = Vec::new();
 		let era = EraIndex::one();
-		let dummy = || T::AccountId::decode(&mut TrailingZeroInput::zeroes()).unwrap();
-		for _ in 0..MAX_SLASHES {
-			unapplied_slashes
-				.push(UnappliedSlash::<T::AccountId, BalanceOf<T>>::default_from(dummy()));
+		let dummy_account = || T::AccountId::decode(&mut TrailingZeroInput::zeroes()).unwrap();
+
+		// Insert `s` unapplied slashes with the new key structure
+		for i in 0..s {
+			let slash_key = (dummy_account(), Perbill::from_percent(i as u32 % 100), i);
+			let unapplied_slash = UnappliedSlash::<T> {
+				validator: slash_key.0.clone(),
+				own: Zero::zero(),
+				others: WeakBoundedVec::default(),
+				reporter: Default::default(),
+				payout: Zero::zero(),
+			};
+			UnappliedSlashes::<T>::insert(era, slash_key.clone(), unapplied_slash);
 		}
-		UnappliedSlashes::<T>::insert(era, &unapplied_slashes);
 
-		let slash_indices: Vec<u32> = (0..s).collect();
+		let slash_keys: Vec<_> = (0..s)
+			.map(|i| (dummy_account(), Perbill::from_percent(i as u32 % 100), i))
+			.collect();
 
 		#[extrinsic_call]
-		_(RawOrigin::Root, era, slash_indices);
+		_(RawOrigin::Root, era, slash_keys.clone());
 
-		assert_eq!(UnappliedSlashes::<T>::get(&era).len(), (MAX_SLASHES - s) as usize);
+		// Ensure all `s` slashes are removed
+		for key in &slash_keys {
+			assert!(UnappliedSlashes::<T>::get(era, key).is_none());
+		}
 	}
 
 	#[benchmark]
@@ -1137,6 +1149,46 @@ mod benchmarks {
 		Ok(())
 	}
 
+	#[benchmark]
+	fn apply_slash() -> Result<(), BenchmarkError> {
+		let era = EraIndex::one();
+		ActiveEra::<T>::put(ActiveEraInfo { index: era, start: None });
+		let (validator, nominators) = create_validator_with_nominators::<T>(
+			T::MaxExposurePageSize::get() as u32,
+			T::MaxExposurePageSize::get() as u32,
+			false,
+			true,
+			RewardDestination::Staked,
+			era,
+		)?;
+		let slash_fraction = Perbill::from_percent(10);
+		let page_index = 0;
+		let slashed_balance = BalanceOf::<T>::from(10u32);
+
+		let slash_key = (validator.clone(), slash_fraction, page_index);
+		let slashed_nominators =
+			nominators.iter().map(|(n, _)| (n.clone(), slashed_balance)).collect::<Vec<_>>();
+
+		let unapplied_slash = UnappliedSlash::<T> {
+			validator: validator.clone(),
+			own: slashed_balance,
+			others: WeakBoundedVec::force_from(slashed_nominators, None),
+			reporter: Default::default(),
+			payout: Zero::zero(),
+		};
+
+		// Insert an unapplied slash to be processed.
+		UnappliedSlashes::<T>::insert(era, slash_key.clone(), unapplied_slash);
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(validator.clone()), era, slash_key.clone());
+
+		// Ensure the slash has been applied and removed.
+		assert!(UnappliedSlashes::<T>::get(era, &slash_key).is_none());
+
+		Ok(())
+	}
+
 	impl_benchmark_test_suite!(
 		Staking,
 		crate::mock::ExtBuilder::default().has_stakers(true),
diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs
index f97b4ed30b8f7c0a9cac494bbad58def587bf180..9105a3e7ec13a5202efdbf7ddb988995759adef8 100644
--- a/substrate/frame/staking/src/lib.rs
+++ b/substrate/frame/staking/src/lib.rs
@@ -353,7 +353,7 @@ use frame_support::{
 		ConstU32, Defensive, DefensiveMax, DefensiveSaturating, Get, LockIdentifier,
 	},
 	weights::Weight,
-	BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound,
+	BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, WeakBoundedVec,
 };
 use scale_info::TypeInfo;
 use sp_runtime::{
@@ -845,31 +845,19 @@ impl<AccountId, Balance: HasCompact + Copy + AtLeast32BitUnsigned + codec::MaxEn
 
 /// A pending slash record. The value of the slash has been computed but not applied yet,
 /// rather deferred for several eras.
-#[derive(Encode, Decode, RuntimeDebug, TypeInfo)]
-pub struct UnappliedSlash<AccountId, Balance: HasCompact> {
+#[derive(Encode, Decode, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen, PartialEqNoBound)]
+#[scale_info(skip_type_params(T))]
+pub struct UnappliedSlash<T: Config> {
 	/// The stash ID of the offending validator.
-	validator: AccountId,
+	validator: T::AccountId,
 	/// The validator's own slash.
-	own: Balance,
+	own: BalanceOf<T>,
 	/// All other slashed stakers and amounts.
-	others: Vec<(AccountId, Balance)>,
+	others: WeakBoundedVec<(T::AccountId, BalanceOf<T>), T::MaxExposurePageSize>,
 	/// Reporters of the offence; bounty payout recipients.
-	reporters: Vec<AccountId>,
+	reporter: Option<T::AccountId>,
 	/// The amount of payout.
-	payout: Balance,
-}
-
-impl<AccountId, Balance: HasCompact + Zero> UnappliedSlash<AccountId, Balance> {
-	/// Initializes the default object using the given `validator`.
-	pub fn default_from(validator: AccountId) -> Self {
-		Self {
-			validator,
-			own: Zero::zero(),
-			others: vec![],
-			reporters: vec![],
-			payout: Zero::zero(),
-		}
-	}
+	payout: BalanceOf<T>,
 }
 
 /// Something that defines the maximum number of nominations per nominator based on a curve.
@@ -906,12 +894,8 @@ impl<Balance, const MAX: u32> NominationsQuota<Balance> for FixedNominationsQuot
 ///
 /// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Config`
 pub trait SessionInterface<AccountId> {
-	/// Disable the validator at the given index, returns `false` if the validator was already
-	/// disabled or the index is out of bounds.
-	fn disable_validator(validator_index: u32) -> bool;
-	/// Re-enable a validator that was previously disabled. Returns `false` if the validator was
-	/// already enabled or the index is out of bounds.
-	fn enable_validator(validator_index: u32) -> bool;
+	/// Report an offending validator.
+	fn report_offence(validator: AccountId, severity: OffenceSeverity);
 	/// Get the validators from session.
 	fn validators() -> Vec<AccountId>;
 	/// Prune historical session tries up to but not including the given index.
@@ -921,10 +905,7 @@ pub trait SessionInterface<AccountId> {
 impl<T: Config> SessionInterface<<T as frame_system::Config>::AccountId> for T
 where
 	T: pallet_session::Config<ValidatorId = <T as frame_system::Config>::AccountId>,
-	T: pallet_session::historical::Config<
-		FullIdentification = Exposure<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
-		FullIdentificationOf = ExposureOf<T>,
-	>,
+	T: pallet_session::historical::Config,
 	T::SessionHandler: pallet_session::SessionHandler<<T as frame_system::Config>::AccountId>,
 	T::SessionManager: pallet_session::SessionManager<<T as frame_system::Config>::AccountId>,
 	T::ValidatorIdOf: Convert<
@@ -932,12 +913,11 @@ where
 		Option<<T as frame_system::Config>::AccountId>,
 	>,
 {
-	fn disable_validator(validator_index: u32) -> bool {
-		<pallet_session::Pallet<T>>::disable_index(validator_index)
-	}
-
-	fn enable_validator(validator_index: u32) -> bool {
-		<pallet_session::Pallet<T>>::enable_index(validator_index)
+	fn report_offence(
+		validator: <T as frame_system::Config>::AccountId,
+		severity: OffenceSeverity,
+	) {
+		<pallet_session::Pallet<T>>::report_offence(validator, severity)
 	}
 
 	fn validators() -> Vec<<T as frame_system::Config>::AccountId> {
@@ -950,11 +930,8 @@ where
 }
 
 impl<AccountId> SessionInterface<AccountId> for () {
-	fn disable_validator(_: u32) -> bool {
-		true
-	}
-	fn enable_validator(_: u32) -> bool {
-		true
+	fn report_offence(_validator: AccountId, _severity: OffenceSeverity) {
+		()
 	}
 	fn validators() -> Vec<AccountId> {
 		Vec::new()
@@ -1071,6 +1048,13 @@ impl<T: Config> Convert<T::AccountId, Option<Exposure<T::AccountId, BalanceOf<T>
 	}
 }
 
+pub struct NullIdentity;
+impl<T> Convert<T, Option<()>> for NullIdentity {
+	fn convert(_: T) -> Option<()> {
+		Some(())
+	}
+}
+
 /// Filter historical offences out and only allow those from the bonding period.
 pub struct FilterHistoricalOffences<T, R> {
 	_inner: core::marker::PhantomData<(T, R)>,
@@ -1337,200 +1321,3 @@ impl BenchmarkingConfig for TestBenchmarkingConfig {
 	type MaxValidators = frame_support::traits::ConstU32<100>;
 	type MaxNominators = frame_support::traits::ConstU32<100>;
 }
-
-/// Controls validator disabling
-pub trait DisablingStrategy<T: Config> {
-	/// Make a disabling decision. Returning a [`DisablingDecision`]
-	fn decision(
-		offender_stash: &T::AccountId,
-		offender_slash_severity: OffenceSeverity,
-		slash_era: EraIndex,
-		currently_disabled: &Vec<(u32, OffenceSeverity)>,
-	) -> DisablingDecision;
-}
-
-/// Helper struct representing a decision coming from a given [`DisablingStrategy`] implementing
-/// `decision`
-///
-/// `disable` is the index of the validator to disable,
-/// `reenable` is the index of the validator to re-enable.
-#[derive(Debug)]
-pub struct DisablingDecision {
-	pub disable: Option<u32>,
-	pub reenable: Option<u32>,
-}
-
-/// Calculate the disabling limit based on the number of validators and the disabling limit factor.
-///
-/// This is a sensible default implementation for the disabling limit factor for most disabling
-/// strategies.
-///
-/// Disabling limit factor n=2 -> 1/n = 1/2 = 50% of validators can be disabled
-fn factor_based_disable_limit(validators_len: usize, disabling_limit_factor: usize) -> usize {
-	validators_len
-		.saturating_sub(1)
-		.checked_div(disabling_limit_factor)
-		.unwrap_or_else(|| {
-			defensive!("DISABLING_LIMIT_FACTOR should not be 0");
-			0
-		})
-}
-
-/// Implementation of [`DisablingStrategy`] using factor_based_disable_limit which disables
-/// validators from the active set up to a threshold. `DISABLING_LIMIT_FACTOR` is the factor of the
-/// maximum disabled validators in the active set. E.g. setting this value to `3` means no more than
-/// 1/3 of the validators in the active set can be disabled in an era.
-///
-/// By default a factor of 3 is used which is the byzantine threshold.
-pub struct UpToLimitDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
-
-impl<const DISABLING_LIMIT_FACTOR: usize> UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR> {
-	/// Disabling limit calculated from the total number of validators in the active set. When
-	/// reached no more validators will be disabled.
-	pub fn disable_limit(validators_len: usize) -> usize {
-		factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
-	}
-}
-
-impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
-	for UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR>
-{
-	fn decision(
-		offender_stash: &T::AccountId,
-		_offender_slash_severity: OffenceSeverity,
-		slash_era: EraIndex,
-		currently_disabled: &Vec<(u32, OffenceSeverity)>,
-	) -> DisablingDecision {
-		let active_set = T::SessionInterface::validators();
-
-		// We don't disable more than the limit
-		if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
-			log!(
-				debug,
-				"Won't disable: reached disabling limit {:?}",
-				Self::disable_limit(active_set.len())
-			);
-			return DisablingDecision { disable: None, reenable: None }
-		}
-
-		// We don't disable for offences in previous eras
-		if ActiveEra::<T>::get().map(|e| e.index).unwrap_or_default() > slash_era {
-			log!(
-				debug,
-				"Won't disable: current_era {:?} > slash_era {:?}",
-				CurrentEra::<T>::get().unwrap_or_default(),
-				slash_era
-			);
-			return DisablingDecision { disable: None, reenable: None }
-		}
-
-		let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
-			idx as u32
-		} else {
-			log!(debug, "Won't disable: offender not in active set",);
-			return DisablingDecision { disable: None, reenable: None }
-		};
-
-		log!(debug, "Will disable {:?}", offender_idx);
-
-		DisablingDecision { disable: Some(offender_idx), reenable: None }
-	}
-}
-
-/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a
-/// limit (factor_based_disable_limit) and if the limit is reached and the new offender is higher
-/// (bigger punishment/severity) then it re-enables the lowest offender to free up space for the new
-/// offender.
-///
-/// This strategy is not based on cumulative severity of offences but only on the severity of the
-/// highest offence. Offender first committing a 25% offence and then a 50% offence will be treated
-/// the same as an offender committing 50% offence.
-///
-/// An extension of [`UpToLimitDisablingStrategy`].
-pub struct UpToLimitWithReEnablingDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
-
-impl<const DISABLING_LIMIT_FACTOR: usize>
-	UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
-{
-	/// Disabling limit calculated from the total number of validators in the active set. When
-	/// reached re-enabling logic might kick in.
-	pub fn disable_limit(validators_len: usize) -> usize {
-		factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
-	}
-}
-
-impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
-	for UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
-{
-	fn decision(
-		offender_stash: &T::AccountId,
-		offender_slash_severity: OffenceSeverity,
-		slash_era: EraIndex,
-		currently_disabled: &Vec<(u32, OffenceSeverity)>,
-	) -> DisablingDecision {
-		let active_set = T::SessionInterface::validators();
-
-		// We don't disable for offences in previous eras
-		if ActiveEra::<T>::get().map(|e| e.index).unwrap_or_default() > slash_era {
-			log!(
-				debug,
-				"Won't disable: current_era {:?} > slash_era {:?}",
-				Pallet::<T>::current_era().unwrap_or_default(),
-				slash_era
-			);
-			return DisablingDecision { disable: None, reenable: None }
-		}
-
-		// We don't disable validators that are not in the active set
-		let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
-			idx as u32
-		} else {
-			log!(debug, "Won't disable: offender not in active set",);
-			return DisablingDecision { disable: None, reenable: None }
-		};
-
-		// Check if offender is already disabled
-		if let Some((_, old_severity)) =
-			currently_disabled.iter().find(|(idx, _)| *idx == offender_idx)
-		{
-			if offender_slash_severity > *old_severity {
-				log!(debug, "Offender already disabled but with lower severity, will disable again to refresh severity of {:?}", offender_idx);
-				return DisablingDecision { disable: Some(offender_idx), reenable: None };
-			} else {
-				log!(debug, "Offender already disabled with higher or equal severity");
-				return DisablingDecision { disable: None, reenable: None };
-			}
-		}
-
-		// We don't disable more than the limit (but we can re-enable a smaller offender to make
-		// space)
-		if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
-			log!(
-				debug,
-				"Reached disabling limit {:?}, checking for re-enabling",
-				Self::disable_limit(active_set.len())
-			);
-
-			// Find the smallest offender to re-enable that is not higher than
-			// offender_slash_severity
-			if let Some((smallest_idx, _)) = currently_disabled
-				.iter()
-				.filter(|(_, severity)| *severity <= offender_slash_severity)
-				.min_by_key(|(_, severity)| *severity)
-			{
-				log!(debug, "Will disable {:?} and re-enable {:?}", offender_idx, smallest_idx);
-				return DisablingDecision {
-					disable: Some(offender_idx),
-					reenable: Some(*smallest_idx),
-				}
-			} else {
-				log!(debug, "No smaller offender found to re-enable");
-				return DisablingDecision { disable: None, reenable: None }
-			}
-		} else {
-			// If we are not at the limit, just disable the new offender and dont re-enable anyone
-			log!(debug, "Will disable {:?}", offender_idx);
-			return DisablingDecision { disable: Some(offender_idx), reenable: None }
-		}
-	}
-}
diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs
index 08667dd61767bf272875fa10b057b4a393a6618c..5b0118da67ef72c1f16c34d2d700af85591b5d08 100644
--- a/substrate/frame/staking/src/migrations.rs
+++ b/substrate/frame/staking/src/migrations.rs
@@ -18,12 +18,12 @@
 //! [CHANGELOG.md](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/frame/staking/CHANGELOG.md).
 
 use super::*;
-use frame_election_provider_support::SortedListProvider;
 use frame_support::{
 	migrations::VersionedMigration,
 	pallet_prelude::ValueQuery,
 	storage_alias,
 	traits::{GetStorageVersion, OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade},
+	Twox64Concat,
 };
 
 #[cfg(feature = "try-runtime")]
@@ -36,10 +36,6 @@ use sp_runtime::TryRuntimeError;
 /// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier.
 #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
 enum ObsoleteReleases {
-	V1_0_0Ancient,
-	V2_0_0,
-	V3_0_0,
-	V4_0_0,
 	V5_0_0,  // blockable validators.
 	V6_0_0,  // removal of all storage associated with offchain phragmen.
 	V7_0_0,  // keep track of number of nominators / validators in map
@@ -60,6 +56,107 @@ impl Default for ObsoleteReleases {
 #[storage_alias]
 type StorageVersion<T: Config> = StorageValue<Pallet<T>, ObsoleteReleases, ValueQuery>;
 
+/// Migrates `UnappliedSlashes` to a new storage structure to support paged slashing.
+/// This ensures that slashing can be processed in batches, preventing large storage operations in a
+/// single block.
+pub mod v17 {
+	use super::*;
+
+	#[derive(Encode, Decode, TypeInfo, MaxEncodedLen)]
+	struct OldUnappliedSlash<T: Config> {
+		validator: T::AccountId,
+		/// The validator's own slash.
+		own: BalanceOf<T>,
+		/// All other slashed stakers and amounts.
+		others: Vec<(T::AccountId, BalanceOf<T>)>,
+		/// Reporters of the offence; bounty payout recipients.
+		reporters: Vec<T::AccountId>,
+		/// The amount of payout.
+		payout: BalanceOf<T>,
+	}
+
+	#[frame_support::storage_alias]
+	pub type OldUnappliedSlashes<T: Config> =
+		StorageMap<Pallet<T>, Twox64Concat, EraIndex, Vec<OldUnappliedSlash<T>>, ValueQuery>;
+
+	#[frame_support::storage_alias]
+	pub type DisabledValidators<T: Config> =
+		StorageValue<Pallet<T>, BoundedVec<(u32, OffenceSeverity), ConstU32<100>>, ValueQuery>;
+
+	pub struct VersionUncheckedMigrateV16ToV17<T>(core::marker::PhantomData<T>);
+	impl<T: Config> UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV16ToV17<T> {
+		fn on_runtime_upgrade() -> Weight {
+			let mut weight: Weight = Weight::zero();
+
+			OldUnappliedSlashes::<T>::drain().for_each(|(era, slashes)| {
+				weight.saturating_accrue(T::DbWeight::get().reads(1));
+
+				for slash in slashes {
+					let validator = slash.validator.clone();
+					let new_slash = UnappliedSlash {
+						validator: validator.clone(),
+						own: slash.own,
+						others: WeakBoundedVec::force_from(slash.others, None),
+						payout: slash.payout,
+						reporter: slash.reporters.first().cloned(),
+					};
+
+					// creating a slash key which is improbable to conflict with a new offence.
+					let slash_key = (validator, Perbill::from_percent(99), 9999);
+					UnappliedSlashes::<T>::insert(era, slash_key, new_slash);
+					weight.saturating_accrue(T::DbWeight::get().writes(1));
+				}
+			});
+
+			weight
+		}
+
+		#[cfg(feature = "try-runtime")]
+		fn pre_upgrade() -> Result<Vec<u8>, sp_runtime::TryRuntimeError> {
+			let mut expected_slashes: u32 = 0;
+			OldUnappliedSlashes::<T>::iter().for_each(|(_, slashes)| {
+				expected_slashes += slashes.len() as u32;
+			});
+
+			Ok(expected_slashes.encode())
+		}
+
+		#[cfg(feature = "try-runtime")]
+		fn post_upgrade(state: Vec<u8>) -> Result<(), TryRuntimeError> {
+			let expected_slash_count =
+				u32::decode(&mut state.as_slice()).expect("Failed to decode state");
+
+			let actual_slash_count = UnappliedSlashes::<T>::iter().count() as u32;
+
+			ensure!(expected_slash_count == actual_slash_count, "Slash count mismatch");
+
+			Ok(())
+		}
+	}
+
+	pub type MigrateV16ToV17<T> = VersionedMigration<
+		16,
+		17,
+		VersionUncheckedMigrateV16ToV17<T>,
+		Pallet<T>,
+		<T as frame_system::Config>::DbWeight,
+	>;
+
+	pub struct MigrateDisabledToSession<T>(core::marker::PhantomData<T>);
+	impl<T: Config> pallet_session::migrations::v1::MigrateDisabledValidators
+		for MigrateDisabledToSession<T>
+	{
+		#[cfg(feature = "try-runtime")]
+		fn peek_disabled() -> Vec<(u32, OffenceSeverity)> {
+			DisabledValidators::<T>::get().into()
+		}
+
+		fn take_disabled() -> Vec<(u32, OffenceSeverity)> {
+			DisabledValidators::<T>::take().into()
+		}
+	}
+}
+
 /// Migrating `DisabledValidators` from `Vec<u32>` to `Vec<(u32, OffenceSeverity)>` to track offense
 /// severity for re-enabling purposes.
 pub mod v16 {
@@ -128,7 +225,7 @@ pub mod v16 {
 			// Decode state to get old_disabled_validators in a format of Vec<u32>
 			let old_disabled_validators =
 				Vec::<u32>::decode(&mut state.as_slice()).expect("Failed to decode state");
-			let new_disabled_validators = DisabledValidators::<T>::get();
+			let new_disabled_validators = v17::DisabledValidators::<T>::get();
 
 			// Compare lengths
 			frame_support::ensure!(
@@ -146,7 +243,7 @@ pub mod v16 {
 
 			// Verify severity
 			let max_severity = OffenceSeverity(Perbill::from_percent(100));
-			let new_disabled_validators = DisabledValidators::<T>::get();
+			let new_disabled_validators = v17::DisabledValidators::<T>::get();
 			for (_, severity) in new_disabled_validators {
 				frame_support::ensure!(severity == max_severity, "Severity mismatch");
 			}
@@ -169,7 +266,7 @@ pub mod v15 {
 	use super::*;
 
 	// The disabling strategy used by staking pallet
-	type DefaultDisablingStrategy = UpToLimitDisablingStrategy;
+	type DefaultDisablingStrategy = pallet_session::disabling::UpToLimitDisablingStrategy;
 
 	#[storage_alias]
 	pub(crate) type DisabledValidators<T: Config> = StorageValue<Pallet<T>, Vec<u32>, ValueQuery>;
@@ -446,257 +543,3 @@ pub mod v11 {
 		}
 	}
 }
-
-pub mod v10 {
-	use super::*;
-	use frame_support::storage_alias;
-
-	#[storage_alias]
-	type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>;
-
-	/// Apply any pending slashes that where queued.
-	///
-	/// That means we might slash someone a bit too early, but we will definitely
-	/// won't forget to slash them. The cap of 512 is somewhat randomly taken to
-	/// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`.
-	pub struct MigrateToV10<T>(core::marker::PhantomData<T>);
-	impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> {
-		fn on_runtime_upgrade() -> frame_support::weights::Weight {
-			if StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0 {
-				let pending_slashes = UnappliedSlashes::<T>::iter().take(512);
-				for (era, slashes) in pending_slashes {
-					for slash in slashes {
-						// in the old slashing scheme, the slash era was the key at which we read
-						// from `UnappliedSlashes`.
-						log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era);
-						slashing::apply_slash::<T>(slash, era);
-					}
-				}
-
-				EarliestUnappliedSlash::<T>::kill();
-				StorageVersion::<T>::put(ObsoleteReleases::V10_0_0);
-
-				log!(info, "MigrateToV10 executed successfully");
-				T::DbWeight::get().reads_writes(1, 2)
-			} else {
-				log!(warn, "MigrateToV10 should be removed.");
-				T::DbWeight::get().reads(1)
-			}
-		}
-	}
-}
-
-pub mod v9 {
-	use super::*;
-	#[cfg(feature = "try-runtime")]
-	use alloc::vec::Vec;
-	#[cfg(feature = "try-runtime")]
-	use codec::{Decode, Encode};
-
-	/// Migration implementation that injects all validators into sorted list.
-	///
-	/// This is only useful for chains that started their `VoterList` just based on nominators.
-	pub struct InjectValidatorsIntoVoterList<T>(core::marker::PhantomData<T>);
-	impl<T: Config> OnRuntimeUpgrade for InjectValidatorsIntoVoterList<T> {
-		fn on_runtime_upgrade() -> Weight {
-			if StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0 {
-				let prev_count = T::VoterList::count();
-				let weight_of_cached = Pallet::<T>::weight_of_fn();
-				for (v, _) in Validators::<T>::iter() {
-					let weight = weight_of_cached(&v);
-					let _ = T::VoterList::on_insert(v.clone(), weight).map_err(|err| {
-						log!(warn, "failed to insert {:?} into VoterList: {:?}", v, err)
-					});
-				}
-
-				log!(
-					info,
-					"injected a total of {} new voters, prev count: {} next count: {}, updating to version 9",
-					Validators::<T>::count(),
-					prev_count,
-					T::VoterList::count(),
-				);
-
-				StorageVersion::<T>::put(ObsoleteReleases::V9_0_0);
-				T::BlockWeights::get().max_block
-			} else {
-				log!(
-					warn,
-					"InjectValidatorsIntoVoterList being executed on the wrong storage \
-				version, expected ObsoleteReleases::V8_0_0"
-				);
-				T::DbWeight::get().reads(1)
-			}
-		}
-
-		#[cfg(feature = "try-runtime")]
-		fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
-			frame_support::ensure!(
-				StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0,
-				"must upgrade linearly"
-			);
-
-			let prev_count = T::VoterList::count();
-			Ok(prev_count.encode())
-		}
-
-		#[cfg(feature = "try-runtime")]
-		fn post_upgrade(prev_count: Vec<u8>) -> Result<(), TryRuntimeError> {
-			let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect(
-				"the state parameter should be something that was generated by pre_upgrade",
-			);
-			let post_count = T::VoterList::count();
-			let validators = Validators::<T>::count();
-			ensure!(
-				post_count == prev_count + validators,
-				"`VoterList` count after the migration must equal to the sum of \
-				previous count and the current number of validators"
-			);
-
-			frame_support::ensure!(
-				StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0,
-				"must upgrade"
-			);
-			Ok(())
-		}
-	}
-}
-
-pub mod v8 {
-	use super::*;
-	use crate::{Config, Nominators, Pallet, Weight};
-	use frame_election_provider_support::SortedListProvider;
-	use frame_support::traits::Get;
-
-	#[cfg(feature = "try-runtime")]
-	pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
-		frame_support::ensure!(
-			StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0,
-			"must upgrade linearly"
-		);
-
-		crate::log!(info, "👜 staking bags-list migration passes PRE migrate checks ✅",);
-		Ok(())
-	}
-
-	/// Migration to sorted `VoterList`.
-	pub fn migrate<T: Config>() -> Weight {
-		if StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0 {
-			crate::log!(info, "migrating staking to ObsoleteReleases::V8_0_0");
-
-			let migrated = T::VoterList::unsafe_regenerate(
-				Nominators::<T>::iter().map(|(id, _)| id),
-				Pallet::<T>::weight_of_fn(),
-			);
-
-			StorageVersion::<T>::put(ObsoleteReleases::V8_0_0);
-			crate::log!(
-				info,
-				"👜 completed staking migration to ObsoleteReleases::V8_0_0 with {} voters migrated",
-				migrated,
-			);
-
-			T::BlockWeights::get().max_block
-		} else {
-			T::DbWeight::get().reads(1)
-		}
-	}
-
-	#[cfg(feature = "try-runtime")]
-	pub fn post_migrate<T: Config>() -> Result<(), &'static str> {
-		T::VoterList::try_state().map_err(|_| "VoterList is not in a sane state.")?;
-		crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",);
-		Ok(())
-	}
-}
-
-pub mod v7 {
-	use super::*;
-	use frame_support::storage_alias;
-
-	#[storage_alias]
-	type CounterForValidators<T: Config> = StorageValue<Pallet<T>, u32>;
-	#[storage_alias]
-	type CounterForNominators<T: Config> = StorageValue<Pallet<T>, u32>;
-
-	pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
-		assert!(
-			CounterForValidators::<T>::get().unwrap().is_zero(),
-			"CounterForValidators already set."
-		);
-		assert!(
-			CounterForNominators::<T>::get().unwrap().is_zero(),
-			"CounterForNominators already set."
-		);
-		assert!(Validators::<T>::count().is_zero(), "Validators already set.");
-		assert!(Nominators::<T>::count().is_zero(), "Nominators already set.");
-		assert!(StorageVersion::<T>::get() == ObsoleteReleases::V6_0_0);
-		Ok(())
-	}
-
-	pub fn migrate<T: Config>() -> Weight {
-		log!(info, "Migrating staking to ObsoleteReleases::V7_0_0");
-		let validator_count = Validators::<T>::iter().count() as u32;
-		let nominator_count = Nominators::<T>::iter().count() as u32;
-
-		CounterForValidators::<T>::put(validator_count);
-		CounterForNominators::<T>::put(nominator_count);
-
-		StorageVersion::<T>::put(ObsoleteReleases::V7_0_0);
-		log!(info, "Completed staking migration to ObsoleteReleases::V7_0_0");
-
-		T::DbWeight::get().reads_writes(validator_count.saturating_add(nominator_count).into(), 2)
-	}
-}
-
-pub mod v6 {
-	use super::*;
-	use frame_support::{storage_alias, traits::Get, weights::Weight};
-
-	// NOTE: value type doesn't matter, we just set it to () here.
-	#[storage_alias]
-	type SnapshotValidators<T: Config> = StorageValue<Pallet<T>, ()>;
-	#[storage_alias]
-	type SnapshotNominators<T: Config> = StorageValue<Pallet<T>, ()>;
-	#[storage_alias]
-	type QueuedElected<T: Config> = StorageValue<Pallet<T>, ()>;
-	#[storage_alias]
-	type QueuedScore<T: Config> = StorageValue<Pallet<T>, ()>;
-	#[storage_alias]
-	type EraElectionStatus<T: Config> = StorageValue<Pallet<T>, ()>;
-	#[storage_alias]
-	type IsCurrentSessionFinal<T: Config> = StorageValue<Pallet<T>, ()>;
-
-	/// check to execute prior to migration.
-	pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
-		// these may or may not exist.
-		log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::<T>::exists());
-		log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::<T>::exists());
-		log!(info, "QueuedElected.exits()? {:?}", QueuedElected::<T>::exists());
-		log!(info, "QueuedScore.exits()? {:?}", QueuedScore::<T>::exists());
-		// these must exist.
-		assert!(
-			IsCurrentSessionFinal::<T>::exists(),
-			"IsCurrentSessionFinal storage item not found!"
-		);
-		assert!(EraElectionStatus::<T>::exists(), "EraElectionStatus storage item not found!");
-		Ok(())
-	}
-
-	/// Migrate storage to v6.
-	pub fn migrate<T: Config>() -> Weight {
-		log!(info, "Migrating staking to ObsoleteReleases::V6_0_0");
-
-		SnapshotValidators::<T>::kill();
-		SnapshotNominators::<T>::kill();
-		QueuedElected::<T>::kill();
-		QueuedScore::<T>::kill();
-		EraElectionStatus::<T>::kill();
-		IsCurrentSessionFinal::<T>::kill();
-
-		StorageVersion::<T>::put(ObsoleteReleases::V6_0_0);
-
-		log!(info, "Done.");
-		T::DbWeight::get().writes(6 + 1)
-	}
-}
diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs
index fdf14976a7d02e03f40025247ceac10b2edd17d4..43cff11d80427c563b1ce51c16c199595c00626c 100644
--- a/substrate/frame/staking/src/mock.rs
+++ b/substrate/frame/staking/src/mock.rs
@@ -148,12 +148,14 @@ impl pallet_session::Config for Test {
 	type ValidatorId = AccountId;
 	type ValidatorIdOf = crate::StashOf<Test>;
 	type NextSessionRotation = pallet_session::PeriodicSessions<Period, Offset>;
+	type DisablingStrategy =
+		pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>;
 	type WeightInfo = ();
 }
 
 impl pallet_session::historical::Config for Test {
-	type FullIdentification = crate::Exposure<AccountId, Balance>;
-	type FullIdentificationOf = crate::ExposureOf<Test>;
+	type FullIdentification = ();
+	type FullIdentificationOf = NullIdentity;
 }
 impl pallet_authorship::Config for Test {
 	type FindAuthor = Author11;
@@ -363,8 +365,6 @@ impl crate::pallet::pallet::Config for Test {
 	type HistoryDepth = HistoryDepth;
 	type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch;
 	type EventListeners = EventListenerMock;
-	type DisablingStrategy =
-		pallet_staking::UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>;
 	type MaxInvulnerables = ConstU32<20>;
 	type MaxDisabledValidators = ConstU32<100>;
 }
@@ -719,6 +719,11 @@ pub(crate) fn run_to_block(n: BlockNumber) {
 	);
 }
 
+/// Progress by n block.
+pub(crate) fn advance_blocks(n: u64) {
+	run_to_block(System::block_number() + n);
+}
+
 /// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`.
 pub(crate) fn start_session(end_session_idx: SessionIndex) {
 	let period = Period::get();
@@ -821,11 +826,21 @@ pub(crate) fn on_offence_in_era(
 	>],
 	slash_fraction: &[Perbill],
 	era: EraIndex,
+	advance_processing_blocks: bool,
 ) {
+	// counter to keep track of how many blocks we need to advance to process all the offences.
+	let mut process_blocks = 0u32;
+	for detail in offenders {
+		process_blocks += EraInfo::<Test>::get_page_count(era, &detail.offender.0);
+	}
+
 	let bonded_eras = crate::BondedEras::<Test>::get();
 	for &(bonded_era, start_session) in bonded_eras.iter() {
 		if bonded_era == era {
 			let _ = Staking::on_offence(offenders, slash_fraction, start_session);
+			if advance_processing_blocks {
+				advance_blocks(process_blocks as u64);
+			}
 			return
 		} else if bonded_era > era {
 			break
@@ -838,6 +853,9 @@ pub(crate) fn on_offence_in_era(
 			slash_fraction,
 			pallet_staking::ErasStartSessionIndex::<Test>::get(era).unwrap(),
 		);
+		if advance_processing_blocks {
+			advance_blocks(process_blocks as u64);
+		}
 	} else {
 		panic!("cannot slash in era {}", era);
 	}
@@ -849,19 +867,23 @@ pub(crate) fn on_offence_now(
 		pallet_session::historical::IdentificationTuple<Test>,
 	>],
 	slash_fraction: &[Perbill],
+	advance_processing_blocks: bool,
 ) {
 	let now = pallet_staking::ActiveEra::<Test>::get().unwrap().index;
-	on_offence_in_era(offenders, slash_fraction, now)
+	on_offence_in_era(offenders, slash_fraction, now, advance_processing_blocks);
+}
+pub(crate) fn offence_from(
+	offender: AccountId,
+	reporter: Option<AccountId>,
+) -> OffenceDetails<AccountId, pallet_session::historical::IdentificationTuple<Test>> {
+	OffenceDetails {
+		offender: (offender, ()),
+		reporters: reporter.map(|r| vec![(r)]).unwrap_or_default(),
+	}
 }
 
 pub(crate) fn add_slash(who: &AccountId) {
-	on_offence_now(
-		&[OffenceDetails {
-			offender: (*who, Staking::eras_stakers(active_era(), who)),
-			reporters: vec![],
-		}],
-		&[Perbill::from_percent(10)],
-	);
+	on_offence_now(&[offence_from(*who, None)], &[Perbill::from_percent(10)], true);
 }
 
 /// Make all validator and nominator request their payment
@@ -1006,6 +1028,14 @@ pub(crate) fn staking_events() -> Vec<crate::Event<Test>> {
 		.collect()
 }
 
+pub(crate) fn session_events() -> Vec<pallet_session::Event<Test>> {
+	System::events()
+		.into_iter()
+		.map(|r| r.event)
+		.filter_map(|e| if let RuntimeEvent::Session(inner) = e { Some(inner) } else { None })
+		.collect()
+}
+
 parameter_types! {
 	static StakingEventsIndex: usize = 0;
 }
diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs
index 8ca018c7d8b41611a3350927c3980b3363d4dbef..0406c1dbbb2d84ab569747010ac43cdb5dab6ae7 100644
--- a/substrate/frame/staking/src/pallet/impls.rs
+++ b/substrate/frame/staking/src/pallet/impls.rs
@@ -34,14 +34,12 @@ use frame_support::{
 use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin};
 use pallet_session::historical;
 use sp_runtime::{
-	traits::{
-		Bounded, CheckedAdd, Convert, One, SaturatedConversion, Saturating, StaticLookup, Zero,
-	},
+	traits::{Bounded, CheckedAdd, Convert, SaturatedConversion, Saturating, StaticLookup, Zero},
 	ArithmeticError, DispatchResult, Perbill, Percent,
 };
 use sp_staking::{
 	currency_to_vote::CurrencyToVote,
-	offence::{OffenceDetails, OnOffenceHandler},
+	offence::{OffenceDetails, OffenceSeverity, OnOffenceHandler},
 	EraIndex, OnStakingUpdate, Page, SessionIndex, Stake,
 	StakingAccount::{self, Controller, Stash},
 	StakingInterface,
@@ -49,15 +47,16 @@ use sp_staking::{
 
 use crate::{
 	asset, election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo,
-	BalanceOf, BoundedExposuresOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing,
-	IndividualExposure, LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, MaxWinnersPerPageOf,
-	Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface,
-	SnapshotStatus, StakingLedger, ValidatorPrefs, STAKING_ID,
+	BalanceOf, BoundedExposuresOf, EraInfo, EraPayout, Exposure, Forcing, IndividualExposure,
+	LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, MaxWinnersPerPageOf, Nominations,
+	NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, SnapshotStatus,
+	StakingLedger, ValidatorPrefs, STAKING_ID,
 };
 use alloc::{boxed::Box, vec, vec::Vec};
 
 use super::pallet::*;
 
+use crate::slashing::OffenceRecord;
 #[cfg(feature = "try-runtime")]
 use frame_support::ensure;
 #[cfg(any(test, feature = "try-runtime"))]
@@ -518,11 +517,12 @@ impl<T: Config> Pallet<T> {
 				frame_support::print("Warning: A session appears to have been skipped.");
 				Self::start_era(start_session);
 			}
-		}
 
-		// disable all offending validators that have been disabled for the whole era
-		for (index, _) in <DisabledValidators<T>>::get() {
-			T::SessionInterface::disable_validator(index);
+			// trigger election in the last session of the era
+			if start_session + 1 == next_active_era_start_session_index {
+				// TODO: trigger election
+				// Self::trigger_election();
+			}
 		}
 	}
 
@@ -577,8 +577,6 @@ impl<T: Config> Pallet<T> {
 				}
 			}
 		});
-
-		Self::apply_unapplied_slashes(active_era);
 	}
 
 	/// Compute payout for era.
@@ -612,9 +610,6 @@ impl<T: Config> Pallet<T> {
 			// Set ending era reward.
 			<ErasValidatorReward<T>>::insert(&active_era.index, validator_payout);
 			T::RewardRemainder::on_unbalanced(asset::issue::<T>(remainder));
-
-			// Clear disabled validators.
-			<DisabledValidators<T>>::kill();
 		}
 	}
 
@@ -983,17 +978,19 @@ impl<T: Config> Pallet<T> {
 	}
 
 	/// Apply previously-unapplied slashes on the beginning of a new era, after a delay.
-	fn apply_unapplied_slashes(active_era: EraIndex) {
-		let era_slashes = UnappliedSlashes::<T>::take(&active_era);
-		log!(
-			debug,
-			"found {} slashes scheduled to be executed in era {:?}",
-			era_slashes.len(),
-			active_era,
-		);
-		for slash in era_slashes {
-			let slash_era = active_era.saturating_sub(T::SlashDeferDuration::get());
-			slashing::apply_slash::<T>(slash, slash_era);
+	pub(crate) fn apply_unapplied_slashes(active_era: EraIndex) {
+		let mut slashes = UnappliedSlashes::<T>::iter_prefix(&active_era).take(1);
+		if let Some((key, slash)) = slashes.next() {
+			log!(
+				debug,
+				"🦹 found slash {:?} scheduled to be executed in era {:?}",
+				slash,
+				active_era,
+			);
+			let offence_era = active_era.saturating_sub(T::SlashDeferDuration::get());
+			slashing::apply_slash::<T>(slash, offence_era);
+			// remove the slash
+			UnappliedSlashes::<T>::remove(&active_era, &key);
 		}
 	}
 
@@ -1762,6 +1759,23 @@ impl<T: Config> historical::SessionManager<T::AccountId, Exposure<T::AccountId,
 	}
 }
 
+impl<T: Config> historical::SessionManager<T::AccountId, ()> for Pallet<T> {
+	fn new_session(new_index: SessionIndex) -> Option<Vec<(T::AccountId, ())>> {
+		<Self as pallet_session::SessionManager<_>>::new_session(new_index)
+			.map(|validators| validators.into_iter().map(|v| (v, ())).collect())
+	}
+	fn new_session_genesis(new_index: SessionIndex) -> Option<Vec<(T::AccountId, ())>> {
+		<Self as pallet_session::SessionManager<_>>::new_session_genesis(new_index)
+			.map(|validators| validators.into_iter().map(|v| (v, ())).collect())
+	}
+	fn start_session(start_index: SessionIndex) {
+		<Self as pallet_session::SessionManager<_>>::start_session(start_index)
+	}
+	fn end_session(end_index: SessionIndex) {
+		<Self as pallet_session::SessionManager<_>>::end_session(end_index)
+	}
+}
+
 /// Add reward points to block authors:
 /// * 20 points to the block producer for producing a (non-uncle) block,
 impl<T> pallet_authorship::EventHandler<T::AccountId, BlockNumberFor<T>> for Pallet<T>
@@ -1779,10 +1793,7 @@ impl<T: Config>
 	for Pallet<T>
 where
 	T: pallet_session::Config<ValidatorId = <T as frame_system::Config>::AccountId>,
-	T: pallet_session::historical::Config<
-		FullIdentification = Exposure<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
-		FullIdentificationOf = ExposureOf<T>,
-	>,
+	T: pallet_session::historical::Config,
 	T::SessionHandler: pallet_session::SessionHandler<<T as frame_system::Config>::AccountId>,
 	T::SessionManager: pallet_session::SessionManager<<T as frame_system::Config>::AccountId>,
 	T::ValidatorIdOf: Convert<
@@ -1790,124 +1801,195 @@ where
 		Option<<T as frame_system::Config>::AccountId>,
 	>,
 {
+	/// When an offence is reported, it is split into pages and put in the offence queue.
+	/// As offence queue is processed, computed slashes are queued to be applied after the
+	/// `SlashDeferDuration`.
 	fn on_offence(
-		offenders: &[OffenceDetails<
-			T::AccountId,
-			pallet_session::historical::IdentificationTuple<T>,
-		>],
-		slash_fraction: &[Perbill],
+		offenders: &[OffenceDetails<T::AccountId, historical::IdentificationTuple<T>>],
+		slash_fractions: &[Perbill],
 		slash_session: SessionIndex,
 	) -> Weight {
-		let reward_proportion = SlashRewardFraction::<T>::get();
-		let mut consumed_weight = Weight::from_parts(0, 0);
+		log!(
+			debug,
+			"🦹 on_offence: offenders={:?}, slash_fractions={:?}, slash_session={}",
+			offenders,
+			slash_fractions,
+			slash_session,
+		);
+
+		// todo(ank4n): Needs to be properly benched.
+		let mut consumed_weight = Weight::zero();
 		let mut add_db_reads_writes = |reads, writes| {
 			consumed_weight += T::DbWeight::get().reads_writes(reads, writes);
 		};
 
-		let active_era = {
-			let active_era = ActiveEra::<T>::get();
-			add_db_reads_writes(1, 0);
-			if active_era.is_none() {
-				// This offence need not be re-submitted.
-				return consumed_weight
-			}
-			active_era.expect("value checked not to be `None`; qed").index
-		};
-		let active_era_start_session_index = ErasStartSessionIndex::<T>::get(active_era)
-			.unwrap_or_else(|| {
-				frame_support::print("Error: start_session_index must be set for current_era");
-				0
-			});
+		// Find the era to which offence belongs.
 		add_db_reads_writes(1, 0);
+		let Some(active_era) = ActiveEra::<T>::get() else {
+			log!(warn, "🦹 on_offence: no active era; ignoring offence");
+			return consumed_weight
+		};
 
-		let window_start = active_era.saturating_sub(T::BondingDuration::get());
+		add_db_reads_writes(1, 0);
+		let active_era_start_session =
+			ErasStartSessionIndex::<T>::get(active_era.index).unwrap_or(0);
 
 		// Fast path for active-era report - most likely.
 		// `slash_session` cannot be in a future active era. It must be in `active_era` or before.
-		let slash_era = if slash_session >= active_era_start_session_index {
-			active_era
+		let offence_era = if slash_session >= active_era_start_session {
+			active_era.index
 		} else {
-			let eras = BondedEras::<T>::get();
 			add_db_reads_writes(1, 0);
-
-			// Reverse because it's more likely to find reports from recent eras.
-			match eras.iter().rev().find(|&(_, sesh)| sesh <= &slash_session) {
-				Some((slash_era, _)) => *slash_era,
-				// Before bonding period. defensive - should be filtered out.
-				None => return consumed_weight,
+			match BondedEras::<T>::get()
+				.iter()
+				// Reverse because it's more likely to find reports from recent eras.
+				.rev()
+				.find(|&(_, sesh)| sesh <= &slash_session)
+				.map(|(era, _)| *era)
+			{
+				Some(era) => era,
+				None => {
+					// defensive: this implies offence is for a discarded era, and should already be
+					// filtered out.
+					log!(warn, "🦹 on_offence: no era found for slash_session; ignoring offence");
+					return Weight::default()
+				},
 			}
 		};
 
-		add_db_reads_writes(1, 1);
-
-		let slash_defer_duration = T::SlashDeferDuration::get();
-
-		let invulnerables = Invulnerables::<T>::get();
 		add_db_reads_writes(1, 0);
+		let invulnerables = Invulnerables::<T>::get();
 
-		for (details, slash_fraction) in offenders.iter().zip(slash_fraction) {
-			let (stash, exposure) = &details.offender;
-
+		for (details, slash_fraction) in offenders.iter().zip(slash_fractions) {
+			let (validator, _) = &details.offender;
 			// Skip if the validator is invulnerable.
-			if invulnerables.contains(stash) {
+			if invulnerables.contains(&validator) {
+				log!(debug, "🦹 on_offence: {:?} is invulnerable; ignoring offence", validator);
 				continue
 			}
 
-			Self::deposit_event(Event::<T>::SlashReported {
-				validator: stash.clone(),
+			add_db_reads_writes(1, 0);
+			let Some(exposure_overview) = <ErasStakersOverview<T>>::get(&offence_era, validator)
+			else {
+				// defensive: this implies offence is for a discarded era, and should already be
+				// filtered out.
+				log!(
+					warn,
+					"🦹 on_offence: no exposure found for {:?} in era {}; ignoring offence",
+					validator,
+					offence_era
+				);
+				continue;
+			};
+
+			Self::deposit_event(Event::<T>::OffenceReported {
+				validator: validator.clone(),
 				fraction: *slash_fraction,
-				slash_era,
+				offence_era,
 			});
 
-			let unapplied = slashing::compute_slash::<T>(slashing::SlashParams {
-				stash,
-				slash: *slash_fraction,
-				exposure,
-				slash_era,
-				window_start,
-				now: active_era,
-				reward_proportion,
-			});
+			if offence_era == active_era.index {
+				// offence is in the current active era. Report it to session to maybe disable the
+				// validator.
+				add_db_reads_writes(2, 2);
+				T::SessionInterface::report_offence(
+					validator.clone(),
+					OffenceSeverity(*slash_fraction),
+				);
+			}
+			add_db_reads_writes(1, 0);
+			let prior_slash_fraction = ValidatorSlashInEra::<T>::get(offence_era, validator)
+				.map_or(Zero::zero(), |(f, _)| f);
 
-			if let Some(mut unapplied) = unapplied {
-				let nominators_len = unapplied.others.len() as u64;
-				let reporters_len = details.reporters.len() as u64;
+			add_db_reads_writes(1, 0);
+			if let Some(existing) = OffenceQueue::<T>::get(offence_era, validator) {
+				if slash_fraction.deconstruct() > existing.slash_fraction.deconstruct() {
+					add_db_reads_writes(0, 2);
+					OffenceQueue::<T>::insert(
+						offence_era,
+						validator,
+						OffenceRecord {
+							reporter: details.reporters.first().cloned(),
+							reported_era: active_era.index,
+							slash_fraction: *slash_fraction,
+							..existing
+						},
+					);
+
+					// update the slash fraction in the `ValidatorSlashInEra` storage.
+					ValidatorSlashInEra::<T>::insert(
+						offence_era,
+						validator,
+						(slash_fraction, exposure_overview.own),
+					);
 
-				{
-					let upper_bound = 1 /* Validator/NominatorSlashInEra */ + 2 /* fetch_spans */;
-					let rw = upper_bound + nominators_len * upper_bound;
-					add_db_reads_writes(rw, rw);
-				}
-				unapplied.reporters = details.reporters.clone();
-				if slash_defer_duration == 0 {
-					// Apply right away.
-					slashing::apply_slash::<T>(unapplied, slash_era);
-					{
-						let slash_cost = (6, 5);
-						let reward_cost = (2, 2);
-						add_db_reads_writes(
-							(1 + nominators_len) * slash_cost.0 + reward_cost.0 * reporters_len,
-							(1 + nominators_len) * slash_cost.1 + reward_cost.1 * reporters_len,
-						);
-					}
-				} else {
-					// Defer to end of some `slash_defer_duration` from now.
 					log!(
 						debug,
-						"deferring slash of {:?}% happened in {:?} (reported in {:?}) to {:?}",
+						"🦹 updated slash for {:?}: {:?} (prior: {:?})",
+						validator,
 						slash_fraction,
-						slash_era,
-						active_era,
-						slash_era + slash_defer_duration + 1,
+						prior_slash_fraction,
 					);
-					UnappliedSlashes::<T>::mutate(
-						slash_era.saturating_add(slash_defer_duration).saturating_add(One::one()),
-						move |for_later| for_later.push(unapplied),
+				} else {
+					log!(
+						debug,
+						"🦹 ignored slash for {:?}: {:?} (existing prior is larger: {:?})",
+						validator,
+						slash_fraction,
+						prior_slash_fraction,
 					);
-					add_db_reads_writes(1, 1);
 				}
+			} else if slash_fraction.deconstruct() > prior_slash_fraction.deconstruct() {
+				add_db_reads_writes(0, 3);
+				ValidatorSlashInEra::<T>::insert(
+					offence_era,
+					validator,
+					(slash_fraction, exposure_overview.own),
+				);
+
+				OffenceQueue::<T>::insert(
+					offence_era,
+					validator,
+					OffenceRecord {
+						reporter: details.reporters.first().cloned(),
+						reported_era: active_era.index,
+						// there are cases of validator with no exposure, hence 0 page, so we
+						// saturate to avoid underflow.
+						exposure_page: exposure_overview.page_count.saturating_sub(1),
+						slash_fraction: *slash_fraction,
+						prior_slash_fraction,
+					},
+				);
+
+				OffenceQueueEras::<T>::mutate(|q| {
+					if let Some(eras) = q {
+						log!(debug, "🦹 inserting offence era {} into existing queue", offence_era);
+						eras.binary_search(&offence_era)
+							.err()
+							.map(|idx| eras.try_insert(idx, offence_era).defensive());
+					} else {
+						let mut eras = BoundedVec::default();
+						log!(debug, "🦹 inserting offence era {} into empty queue", offence_era);
+						let _ = eras.try_push(offence_era).defensive();
+						*q = Some(eras);
+					}
+				});
+
+				log!(
+					debug,
+					"🦹 queued slash for {:?}: {:?} (prior: {:?})",
+					validator,
+					slash_fraction,
+					prior_slash_fraction,
+				);
 			} else {
-				add_db_reads_writes(4 /* fetch_spans */, 5 /* kick_out_if_recent */)
+				log!(
+					debug,
+					"🦹 ignored slash for {:?}: {:?} (already slashed in era with prior: {:?})",
+					validator,
+					slash_fraction,
+					prior_slash_fraction,
+				);
 			}
 		}
 
@@ -2332,8 +2414,7 @@ impl<T: Config> Pallet<T> {
 		Self::check_payees()?;
 		Self::check_nominators()?;
 		Self::check_paged_exposures()?;
-		Self::check_count()?;
-		Self::ensure_disabled_validators_sorted()
+		Self::check_count()
 	}
 
 	/// Test invariants of:
@@ -2345,6 +2426,7 @@ impl<T: Config> Pallet<T> {
 	///
 	/// -- SHOULD ONLY BE CALLED AT THE END OF A GIVEN BLOCK.
 	pub fn ensure_snapshot_metadata_state(now: BlockNumberFor<T>) -> Result<(), TryRuntimeError> {
+		use sp_runtime::traits::One;
 		let next_election = Self::next_election_prediction(now);
 		let pages = Self::election_pages().saturated_into::<BlockNumberFor<T>>();
 		let election_prep_start = next_election - pages;
@@ -2665,6 +2747,7 @@ impl<T: Config> Pallet<T> {
 		Ok(())
 	}
 
+	/* todo(ank4n): move to session try runtime
 	// Sorted by index
 	fn ensure_disabled_validators_sorted() -> Result<(), TryRuntimeError> {
 		ensure!(
@@ -2673,4 +2756,6 @@ impl<T: Config> Pallet<T> {
 		);
 		Ok(())
 	}
+
+	 */
 }
diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs
index 7d22df148deb0811a11ad73a8407b48ec9643476..4184ac3bdde81d8e77036f5c53bc9c405d64ca36 100644
--- a/substrate/frame/staking/src/pallet/mod.rs
+++ b/substrate/frame/staking/src/pallet/mod.rs
@@ -47,7 +47,6 @@ use sp_runtime::{
 };
 
 use sp_staking::{
-	offence::OffenceSeverity,
 	EraIndex, Page, SessionIndex,
 	StakingAccount::{self, Controller, Stash},
 	StakingInterface,
@@ -58,11 +57,10 @@ mod impls;
 pub use impls::*;
 
 use crate::{
-	asset, slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf,
-	DisablingStrategy, EraPayout, EraRewardPoints, ExposurePage, Forcing, LedgerIntegrityState,
-	MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf,
-	RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk,
-	ValidatorPrefs,
+	asset, slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout,
+	EraRewardPoints, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf,
+	NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination,
+	SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs,
 };
 
 // The speculative number of spans are used as an input of the weight annotation of
@@ -78,7 +76,7 @@ pub mod pallet {
 	use frame_election_provider_support::{ElectionDataProvider, PageIndex};
 
 	/// The in-code storage version.
-	const STORAGE_VERSION: StorageVersion = StorageVersion::new(16);
+	const STORAGE_VERSION: StorageVersion = StorageVersion::new(17);
 
 	#[pallet::pallet]
 	#[pallet::storage_version(STORAGE_VERSION)]
@@ -325,10 +323,6 @@ pub mod pallet {
 		#[pallet::no_default_bounds]
 		type EventListeners: sp_staking::OnStakingUpdate<Self::AccountId, BalanceOf<Self>>;
 
-		/// `DisablingStragegy` controls how validators are disabled
-		#[pallet::no_default_bounds]
-		type DisablingStrategy: DisablingStrategy<Self>;
-
 		/// Maximum number of invulnerable validators.
 		#[pallet::constant]
 		type MaxInvulnerables: Get<u32>;
@@ -396,7 +390,6 @@ pub mod pallet {
 			type MaxInvulnerables = ConstU32<20>;
 			type MaxDisabledValidators = ConstU32<100>;
 			type EventListeners = ();
-			type DisablingStrategy = crate::UpToLimitDisablingStrategy;
 			#[cfg(feature = "std")]
 			type BenchmarkingConfig = crate::TestBenchmarkingConfig;
 			type WeightInfo = ();
@@ -645,15 +638,67 @@ pub mod pallet {
 	#[pallet::storage]
 	pub type CanceledSlashPayout<T: Config> = StorageValue<_, BalanceOf<T>, ValueQuery>;
 
+	/// Stores reported offences in a queue until they are processed in subsequent blocks.
+	///
+	/// Each offence is recorded under the corresponding era index and the offending validator's
+	/// account. If an offence spans multiple pages, only one page is processed at a time. Offences
+	/// are handled sequentially, with their associated slashes computed and stored in
+	/// `UnappliedSlashes`. These slashes are then applied in a future era as determined by
+	/// `SlashDeferDuration`.
+	///
+	/// Any offences tied to an era older than `BondingDuration` are automatically dropped.
+	/// Processing always prioritizes the oldest era first.
+	#[pallet::storage]
+	pub type OffenceQueue<T: Config> = StorageDoubleMap<
+		_,
+		Twox64Concat,
+		EraIndex,
+		Twox64Concat,
+		T::AccountId,
+		slashing::OffenceRecord<T::AccountId>,
+	>;
+
+	/// Tracks the eras that contain offences in `OffenceQueue`, sorted from **earliest to latest**.
+	///
+	/// - This ensures efficient retrieval of the oldest offence without iterating through
+	/// `OffenceQueue`.
+	/// - When a new offence is added to `OffenceQueue`, its era is **inserted in sorted order**
+	/// if not already present.
+	/// - When all offences for an era are processed, it is **removed** from this list.
+	/// - The maximum length of this vector is bounded by `BondingDuration`.
+	///
+	/// This eliminates the need for expensive iteration and sorting when fetching the next offence
+	/// to process.
+	#[pallet::storage]
+	pub type OffenceQueueEras<T: Config> = StorageValue<_, BoundedVec<u32, T::BondingDuration>>;
+
+	/// Tracks the currently processed offence record from the `OffenceQueue`.
+	///
+	/// - When processing offences, an offence record is **popped** from the oldest era in
+	///   `OffenceQueue` and stored here.
+	/// - The function `process_offence` reads from this storage, processing one page of exposure at
+	///   a time.
+	/// - After processing a page, the `exposure_page` count is **decremented** until it reaches
+	///   zero.
+	/// - Once fully processed, the offence record is removed from this storage.
+	///
+	/// This ensures that offences are processed incrementally, preventing excessive computation
+	/// in a single block while maintaining correct slashing behavior.
+	#[pallet::storage]
+	pub type ProcessingOffence<T: Config> =
+		StorageValue<_, (EraIndex, T::AccountId, slashing::OffenceRecord<T::AccountId>)>;
+
 	/// All unapplied slashes that are queued for later.
 	#[pallet::storage]
-	#[pallet::unbounded]
-	pub type UnappliedSlashes<T: Config> = StorageMap<
+	pub type UnappliedSlashes<T: Config> = StorageDoubleMap<
 		_,
 		Twox64Concat,
 		EraIndex,
-		Vec<UnappliedSlash<T::AccountId, BalanceOf<T>>>,
-		ValueQuery,
+		Twox64Concat,
+		// Unique key for unapplied slashes: (validator, slash fraction, page index).
+		(T::AccountId, Perbill, u32),
+		UnappliedSlash<T>,
+		OptionQuery,
 	>;
 
 	/// A mapping from still-bonded eras to the first session index of that era.
@@ -705,20 +750,6 @@ pub mod pallet {
 	#[pallet::storage]
 	pub type CurrentPlannedSession<T> = StorageValue<_, SessionIndex, ValueQuery>;
 
-	/// Indices of validators that have offended in the active era. The offenders are disabled for a
-	/// whole era. For this reason they are kept here - only staking pallet knows about eras. The
-	/// implementor of [`DisablingStrategy`] defines if a validator should be disabled which
-	/// implicitly means that the implementor also controls the max number of disabled validators.
-	///
-	/// The vec is always kept sorted based on the u32 index so that we can find whether a given
-	/// validator has previously offended using binary search.
-	///
-	/// Additionally, each disabled validator is associated with an `OffenceSeverity` which
-	/// represents how severe is the offence that got the validator disabled.
-	#[pallet::storage]
-	pub type DisabledValidators<T: Config> =
-		StorageValue<_, BoundedVec<(u32, OffenceSeverity), T::MaxDisabledValidators>, ValueQuery>;
-
 	/// The threshold for when users can start calling `chill_other` for other validators /
 	/// nominators. The threshold is compared to the actual number of validators / nominators
 	/// (`CountFor*`) in the system compared to the configured max (`Max*Count`).
@@ -936,13 +967,6 @@ pub mod pallet {
 			staker: T::AccountId,
 			amount: BalanceOf<T>,
 		},
-		/// A slash for the given validator, for the given percentage of their stake, at the given
-		/// era as been reported.
-		SlashReported {
-			validator: T::AccountId,
-			fraction: Perbill,
-			slash_era: EraIndex,
-		},
 		/// An old slashing report from a prior era was discarded because it could
 		/// not be processed.
 		OldSlashingReportDiscarded {
@@ -1007,14 +1031,6 @@ pub mod pallet {
 		ControllerBatchDeprecated {
 			failures: u32,
 		},
-		/// Validator has been disabled.
-		ValidatorDisabled {
-			stash: T::AccountId,
-		},
-		/// Validator has been re-enabled.
-		ValidatorReenabled {
-			stash: T::AccountId,
-		},
 		/// Staking balance migrated from locks to holds, with any balance that could not be held
 		/// is force withdrawn.
 		CurrencyMigrated {
@@ -1034,6 +1050,26 @@ pub mod pallet {
 			page: PageIndex,
 			result: Result<u32, u32>,
 		},
+		/// An offence for the given validator, for the given percentage of their stake, at the
+		/// given era as been reported.
+		OffenceReported {
+			offence_era: EraIndex,
+			validator: T::AccountId,
+			fraction: Perbill,
+		},
+		/// An offence has been processed and the corresponding slash has been computed.
+		SlashComputed {
+			offence_era: EraIndex,
+			slash_era: EraIndex,
+			offender: T::AccountId,
+			page: u32,
+		},
+		/// An unapplied slash has been cancelled.
+		SlashCancelled {
+			slash_era: EraIndex,
+			slash_key: (T::AccountId, Perbill, u32),
+			payout: BalanceOf<T>,
+		},
 	}
 
 	#[pallet::error]
@@ -1051,8 +1087,8 @@ pub mod pallet {
 		EmptyTargets,
 		/// Duplicate index.
 		DuplicateIndex,
-		/// Slash record index out of bounds.
-		InvalidSlashIndex,
+		/// Slash record not found.
+		InvalidSlashRecord,
 		/// Cannot have a validator or nominator role, with value less than the minimum defined by
 		/// governance (see `MinValidatorBond` and `MinNominatorBond`). If unbonding is the
 		/// intention, `chill` first to remove one's role as validator/nominator.
@@ -1067,8 +1103,6 @@ pub mod pallet {
 		InvalidEraToReward,
 		/// Invalid number of nominations.
 		InvalidNumberOfNominations,
-		/// Items are not sorted and unique.
-		NotSortedAndUnique,
 		/// Rewards for this era have already been claimed for this validator.
 		AlreadyClaimed,
 		/// No nominators exist on this page.
@@ -1109,6 +1143,8 @@ pub mod pallet {
 		CannotReapStash,
 		/// The stake of this account is already migrated to `Fungible` holds.
 		AlreadyMigrated,
+		/// Era not yet started.
+		EraNotStarted,
 	}
 
 	#[pallet::hooks]
@@ -1117,6 +1153,21 @@ pub mod pallet {
 		/// that the `ElectableStashes` has been populated with all validators from all pages at
 		/// the time of the election.
 		fn on_initialize(now: BlockNumberFor<T>) -> Weight {
+			// todo(ank4n): Hacky bench. Do it properly.
+			let mut consumed_weight = slashing::process_offence::<T>();
+
+			consumed_weight.saturating_accrue(T::DbWeight::get().reads(1));
+			if let Some(active_era) = ActiveEra::<T>::get() {
+				let max_slash_page_size = T::MaxExposurePageSize::get();
+				consumed_weight.saturating_accrue(
+					T::DbWeight::get().reads_writes(
+						3 * max_slash_page_size as u64,
+						3 * max_slash_page_size as u64,
+					),
+				);
+				Self::apply_unapplied_slashes(active_era.index);
+			}
+
 			let pages = Self::election_pages();
 
 			// election ongoing, fetch the next page.
@@ -1144,7 +1195,9 @@ pub mod pallet {
 				}
 			};
 
-			T::WeightInfo::on_initialize_noop().saturating_add(inner_weight)
+			consumed_weight.saturating_accrue(inner_weight);
+
+			consumed_weight
 		}
 
 		fn on_finalize(_n: BlockNumberFor<T>) {
@@ -1907,33 +1960,35 @@ pub mod pallet {
 			Ok(())
 		}
 
-		/// Cancel enactment of a deferred slash.
+		/// Cancels scheduled slashes for a given era before they are applied.
 		///
-		/// Can be called by the `T::AdminOrigin`.
+		/// This function allows `T::AdminOrigin` to selectively remove pending slashes from
+		/// the `UnappliedSlashes` storage, preventing their enactment.
 		///
-		/// Parameters: era and indices of the slashes for that era to kill.
+		/// ## Parameters
+		/// - `era`: The staking era for which slashes were deferred.
+		/// - `slash_keys`: A list of slash keys identifying the slashes to remove. This is a tuple
+		/// of `(stash, slash_fraction, page_index)`.
 		#[pallet::call_index(17)]
-		#[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32))]
+		#[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_keys.len() as u32))]
 		pub fn cancel_deferred_slash(
 			origin: OriginFor<T>,
 			era: EraIndex,
-			slash_indices: Vec<u32>,
+			slash_keys: Vec<(T::AccountId, Perbill, u32)>,
 		) -> DispatchResult {
 			T::AdminOrigin::ensure_origin(origin)?;
-
-			ensure!(!slash_indices.is_empty(), Error::<T>::EmptyTargets);
-			ensure!(is_sorted_and_unique(&slash_indices), Error::<T>::NotSortedAndUnique);
-
-			let mut unapplied = UnappliedSlashes::<T>::get(&era);
-			let last_item = slash_indices[slash_indices.len() - 1];
-			ensure!((last_item as usize) < unapplied.len(), Error::<T>::InvalidSlashIndex);
-
-			for (removed, index) in slash_indices.into_iter().enumerate() {
-				let index = (index as usize) - removed;
-				unapplied.remove(index);
-			}
-
-			UnappliedSlashes::<T>::insert(&era, &unapplied);
+			ensure!(!slash_keys.is_empty(), Error::<T>::EmptyTargets);
+
+			// Remove the unapplied slashes.
+			slash_keys.into_iter().for_each(|i| {
+				UnappliedSlashes::<T>::take(&era, &i).map(|unapplied_slash| {
+					Self::deposit_event(Event::<T>::SlashCancelled {
+						slash_era: era,
+						slash_key: i,
+						payout: unapplied_slash.payout,
+					});
+				});
+			});
 			Ok(())
 		}
 
@@ -2494,10 +2549,44 @@ pub mod pallet {
 			// Refund the transaction fee if successful.
 			Ok(Pays::No.into())
 		}
-	}
-}
 
-/// Check that list is sorted and has no duplicates.
-fn is_sorted_and_unique(list: &[u32]) -> bool {
-	list.windows(2).all(|w| w[0] < w[1])
+		/// Manually applies a deferred slash for a given era.
+		///
+		/// Normally, slashes are automatically applied shortly after the start of the `slash_era`.
+		/// This function exists as a **fallback mechanism** in case slashes were not applied due to
+		/// unexpected reasons. It allows anyone to manually apply an unapplied slash.
+		///
+		/// ## Parameters
+		/// - `slash_era`: The staking era in which the slash was originally scheduled.
+		/// - `slash_key`: A unique identifier for the slash, represented as a tuple:
+		///   - `stash`: The stash account of the validator being slashed.
+		///   - `slash_fraction`: The fraction of the stake that was slashed.
+		///   - `page_index`: The index of the exposure page being processed.
+		///
+		/// ## Behavior
+		/// - The function is **permissionless**—anyone can call it.
+		/// - The `slash_era` **must be the current era or a past era**. If it is in the future, the
+		///   call fails with `EraNotStarted`.
+		/// - The fee is waived if the slash is successfully applied.
+		///
+		/// ## TODO: Future Improvement
+		/// - Implement an **off-chain worker (OCW) task** to automatically apply slashes when there
+		///   is unused block space, improving efficiency.
+		#[pallet::call_index(31)]
+		#[pallet::weight(T::WeightInfo::apply_slash())]
+		pub fn apply_slash(
+			origin: OriginFor<T>,
+			slash_era: EraIndex,
+			slash_key: (T::AccountId, Perbill, u32),
+		) -> DispatchResultWithPostInfo {
+			let _ = ensure_signed(origin)?;
+			let active_era = ActiveEra::<T>::get().map(|a| a.index).unwrap_or_default();
+			ensure!(slash_era <= active_era, Error::<T>::EraNotStarted);
+			let unapplied_slash = UnappliedSlashes::<T>::take(&slash_era, &slash_key)
+				.ok_or(Error::<T>::InvalidSlashRecord)?;
+			slashing::apply_slash::<T>(unapplied_slash, slash_era);
+
+			Ok(Pays::No.into())
+		}
+	}
 }
diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs
index 98a6424fe7ac6cba720cebe46c46990da52921e0..30d4197a888a706570e367bcfaf2a5d9c6418c32 100644
--- a/substrate/frame/staking/src/slashing.rs
+++ b/substrate/frame/staking/src/slashing.rs
@@ -50,22 +50,22 @@
 //! Based on research at <https://research.web3.foundation/en/latest/polkadot/slashing/npos.html>
 
 use crate::{
-	asset, BalanceOf, Config, DisabledValidators, DisablingStrategy, Error, Exposure,
-	NegativeImbalanceOf, NominatorSlashInEra, Pallet, Perbill, SessionInterface, SpanSlash,
-	UnappliedSlash, ValidatorSlashInEra,
+	asset, log, BalanceOf, Config, EraInfo, Error, NegativeImbalanceOf, NominatorSlashInEra,
+	OffenceQueue, OffenceQueueEras, PagedExposure, Pallet, Perbill, ProcessingOffence,
+	SlashRewardFraction, SpanSlash, UnappliedSlash, UnappliedSlashes, ValidatorSlashInEra,
 };
 use alloc::vec::Vec;
 use codec::{Decode, Encode, MaxEncodedLen};
 use frame_support::{
 	ensure,
-	traits::{Defensive, DefensiveSaturating, Imbalance, OnUnbalanced},
+	traits::{Defensive, DefensiveSaturating, Get, Imbalance, OnUnbalanced},
 };
 use scale_info::TypeInfo;
 use sp_runtime::{
 	traits::{Saturating, Zero},
-	DispatchResult, RuntimeDebug,
+	DispatchResult, RuntimeDebug, WeakBoundedVec, Weight,
 };
-use sp_staking::{offence::OffenceSeverity, EraIndex, StakingInterface};
+use sp_staking::{EraIndex, StakingInterface};
 
 /// The proportion of the slashing reward to be paid out on the first slashing detection.
 /// This is f_1 in the paper.
@@ -209,8 +209,12 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> {
 	pub(crate) stash: &'a T::AccountId,
 	/// The proportion of the slash.
 	pub(crate) slash: Perbill,
+	/// The prior slash proportion of the validator if the validator has been reported multiple
+	/// times in the same era, and a new greater slash replaces the old one.
+	/// Invariant: slash > prior_slash
+	pub(crate) prior_slash: Perbill,
 	/// The exposure of the stash and all nominators.
-	pub(crate) exposure: &'a Exposure<T::AccountId, BalanceOf<T>>,
+	pub(crate) exposure: &'a PagedExposure<T::AccountId, BalanceOf<T>>,
 	/// The era where the offence occurred.
 	pub(crate) slash_era: EraIndex,
 	/// The first era in the current bonding period.
@@ -222,78 +226,248 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> {
 	pub(crate) reward_proportion: Perbill,
 }
 
-/// Computes a slash of a validator and nominators. It returns an unapplied
-/// record to be applied at some later point. Slashing metadata is updated in storage,
-/// since unapplied records are only rarely intended to be dropped.
+/// Represents an offence record within the staking system, capturing details about a slashing
+/// event.
+#[derive(Clone, Encode, Decode, TypeInfo, MaxEncodedLen, PartialEq, RuntimeDebug)]
+pub struct OffenceRecord<AccountId> {
+	/// The account ID of the entity that reported the offence.
+	pub reporter: Option<AccountId>,
+
+	/// Era at which the offence was reported.
+	pub reported_era: EraIndex,
+
+	/// The specific page of the validator's exposure currently being processed.
+	///
+	/// Since a validator's total exposure can span multiple pages, this field serves as a pointer
+	/// to the current page being evaluated. The processing order starts from the last page
+	/// and moves backward, decrementing this value with each processed page.
+	///
+	/// This ensures that all pages are systematically handled, and it helps track when
+	/// the entire exposure has been processed.
+	pub exposure_page: u32,
+
+	/// The fraction of the validator's stake to be slashed for this offence.
+	pub slash_fraction: Perbill,
+
+	/// The previous slash fraction of the validator's stake before being updated.
+	/// If a new, higher slash fraction is reported, this field stores the prior fraction
+	/// that was overwritten. This helps in tracking changes in slashes across multiple reports for
+	/// the same era.
+	pub prior_slash_fraction: Perbill,
+}
+
+/// Loads next offence in the processing offence and returns the offense record to be processed.
 ///
-/// The pending slash record returned does not have initialized reporters. Those have
-/// to be set at a higher level, if any.
-pub(crate) fn compute_slash<T: Config>(
-	params: SlashParams<T>,
-) -> Option<UnappliedSlash<T::AccountId, BalanceOf<T>>> {
-	let mut reward_payout = Zero::zero();
-	let mut val_slashed = Zero::zero();
+/// Note: this can mutate the following storage
+/// - `ProcessingOffence`
+/// - `OffenceQueue`
+/// - `OffenceQueueEras`
+fn next_offence<T: Config>() -> Option<(EraIndex, T::AccountId, OffenceRecord<T::AccountId>)> {
+	let processing_offence = ProcessingOffence::<T>::get();
+
+	if let Some((offence_era, offender, offence_record)) = processing_offence {
+		// If the exposure page is 0, then the offence has been processed.
+		if offence_record.exposure_page == 0 {
+			ProcessingOffence::<T>::kill();
+			return Some((offence_era, offender, offence_record))
+		}
 
-	// is the slash amount here a maximum for the era?
-	let own_slash = params.slash * params.exposure.own;
-	if params.slash * params.exposure.total == Zero::zero() {
-		// kick out the validator even if they won't be slashed,
-		// as long as the misbehavior is from their most recent slashing span.
-		kick_out_if_recent::<T>(params);
-		return None
+		// Update the next page.
+		ProcessingOffence::<T>::put((
+			offence_era,
+			&offender,
+			OffenceRecord {
+				// decrement the page index.
+				exposure_page: offence_record.exposure_page.defensive_saturating_sub(1),
+				..offence_record.clone()
+			},
+		));
+
+		return Some((offence_era, offender, offence_record))
 	}
 
-	let prior_slash_p = ValidatorSlashInEra::<T>::get(&params.slash_era, params.stash)
-		.map_or(Zero::zero(), |(prior_slash_proportion, _)| prior_slash_proportion);
+	// Nothing in processing offence. Try to enqueue the next offence.
+	let Some(mut eras) = OffenceQueueEras::<T>::get() else { return None };
+	let Some(&oldest_era) = eras.first() else { return None };
+
+	let mut offence_iter = OffenceQueue::<T>::iter_prefix(oldest_era);
+	let next_offence = offence_iter.next();
+
+	if let Some((ref validator, ref offence_record)) = next_offence {
+		// Update the processing offence if the offence is multi-page.
+		if offence_record.exposure_page > 0 {
+			// update processing offence with the next page.
+			ProcessingOffence::<T>::put((
+				oldest_era,
+				validator.clone(),
+				OffenceRecord {
+					exposure_page: offence_record.exposure_page.defensive_saturating_sub(1),
+					..offence_record.clone()
+				},
+			));
+		}
 
-	// compare slash proportions rather than slash values to avoid issues due to rounding
-	// error.
-	if params.slash.deconstruct() > prior_slash_p.deconstruct() {
-		ValidatorSlashInEra::<T>::insert(
-			&params.slash_era,
-			params.stash,
-			&(params.slash, own_slash),
-		);
-	} else {
-		// we slash based on the max in era - this new event is not the max,
-		// so neither the validator or any nominators will need an update.
-		//
-		// this does lead to a divergence of our system from the paper, which
-		// pays out some reward even if the latest report is not max-in-era.
-		// we opt to avoid the nominator lookups and edits and leave more rewards
-		// for more drastic misbehavior.
-		return None
+		// Remove from `OffenceQueue`
+		OffenceQueue::<T>::remove(oldest_era, &validator);
 	}
 
-	// apply slash to validator.
-	{
-		let mut spans = fetch_spans::<T>(
-			params.stash,
-			params.window_start,
-			&mut reward_payout,
-			&mut val_slashed,
-			params.reward_proportion,
+	// If there are no offences left for the era, remove the era from `OffenceQueueEras`.
+	if offence_iter.next().is_none() {
+		if eras.len() == 1 {
+			// If there is only one era left, remove the entire queue.
+			OffenceQueueEras::<T>::kill();
+		} else {
+			// Remove the oldest era
+			eras.remove(0);
+			OffenceQueueEras::<T>::put(eras);
+		}
+	}
+
+	next_offence.map(|(v, o)| (oldest_era, v, o))
+}
+
+/// Infallible function to process an offence.
+pub(crate) fn process_offence<T: Config>() -> Weight {
+	// todo(ank4n): this needs to be properly benched.
+	let mut consumed_weight = Weight::from_parts(0, 0);
+	let mut add_db_reads_writes = |reads, writes| {
+		consumed_weight += T::DbWeight::get().reads_writes(reads, writes);
+	};
+
+	add_db_reads_writes(1, 1);
+	let Some((offence_era, offender, offence_record)) = next_offence::<T>() else {
+		return consumed_weight
+	};
+
+	log!(
+		debug,
+		"🦹 Processing offence for {:?} in era {:?} with slash fraction {:?}",
+		offender,
+		offence_era,
+		offence_record.slash_fraction,
+	);
+
+	add_db_reads_writes(1, 0);
+	let reward_proportion = SlashRewardFraction::<T>::get();
+
+	add_db_reads_writes(2, 0);
+	let Some(exposure) =
+		EraInfo::<T>::get_paged_exposure(offence_era, &offender, offence_record.exposure_page)
+	else {
+		// this can only happen if the offence was valid at the time of reporting but became too old
+		// at the time of computing and should be discarded.
+		return consumed_weight
+	};
+
+	let slash_page = offence_record.exposure_page;
+	let slash_defer_duration = T::SlashDeferDuration::get();
+	let slash_era = offence_era.saturating_add(slash_defer_duration);
+	let window_start = offence_record.reported_era.saturating_sub(T::BondingDuration::get());
+
+	add_db_reads_writes(3, 3);
+	let Some(mut unapplied) = compute_slash::<T>(SlashParams {
+		stash: &offender,
+		slash: offence_record.slash_fraction,
+		prior_slash: offence_record.prior_slash_fraction,
+		exposure: &exposure,
+		slash_era: offence_era,
+		window_start,
+		now: offence_record.reported_era,
+		reward_proportion,
+	}) else {
+		log!(
+			debug,
+			"🦹 Slash of {:?}% happened in {:?} (reported in {:?}) is discarded, as could not compute slash",
+			offence_record.slash_fraction,
+			offence_era,
+			offence_record.reported_era,
 		);
+		// No slash to apply. Discard.
+		return consumed_weight
+	};
 
-		let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash);
+	<Pallet<T>>::deposit_event(super::Event::<T>::SlashComputed {
+		offence_era,
+		slash_era,
+		offender: offender.clone(),
+		page: slash_page,
+	});
 
-		if target_span == Some(spans.span_index()) {
-			// misbehavior occurred within the current slashing span - end current span.
-			// Check <https://github.com/paritytech/polkadot-sdk/issues/2650> for details.
-			spans.end_span(params.now);
-		}
+	log!(
+		debug,
+		"🦹 Slash of {:?}% happened in {:?} (reported in {:?}) is computed",
+		offence_record.slash_fraction,
+		offence_era,
+		offence_record.reported_era,
+	);
+
+	// add the reporter to the unapplied slash.
+	unapplied.reporter = offence_record.reporter;
+
+	if slash_defer_duration == 0 {
+		// Apply right away.
+		log!(
+			debug,
+			"🦹 applying slash instantly of {:?}% happened in {:?} (reported in {:?}) to {:?}",
+			offence_record.slash_fraction,
+			offence_era,
+			offence_record.reported_era,
+			offender,
+		);
+
+		let accounts_slashed = unapplied.others.len() as u64 + 1;
+		add_db_reads_writes(3 * accounts_slashed, 3 * accounts_slashed);
+		apply_slash::<T>(unapplied, offence_era);
+	} else {
+		// Historical Note: Previously, with BondingDuration = 28 and SlashDeferDuration = 27,
+		// slashes were applied at the start of the 28th era from `offence_era`.
+		// However, with paged slashing, applying slashes now takes multiple blocks.
+		// To account for this delay, slashes are now applied at the start of the 27th era from
+		// `offence_era`.
+		log!(
+			debug,
+			"🦹 deferring slash of {:?}% happened in {:?} (reported in {:?}) to {:?}",
+			offence_record.slash_fraction,
+			offence_era,
+			offence_record.reported_era,
+			slash_era,
+		);
+
+		add_db_reads_writes(0, 1);
+		UnappliedSlashes::<T>::insert(
+			slash_era,
+			(offender, offence_record.slash_fraction, slash_page),
+			unapplied,
+		);
 	}
 
-	add_offending_validator::<T>(&params);
+	consumed_weight
+}
+
+/// Computes a slash of a validator and nominators. It returns an unapplied
+/// record to be applied at some later point. Slashing metadata is updated in storage,
+/// since unapplied records are only rarely intended to be dropped.
+///
+/// The pending slash record returned does not have initialized reporters. Those have
+/// to be set at a higher level, if any.
+///
+/// If `nomintors_only` is set to `true`, only the nominator slashes will be computed.
+pub(crate) fn compute_slash<T: Config>(params: SlashParams<T>) -> Option<UnappliedSlash<T>> {
+	let (val_slashed, mut reward_payout) = slash_validator::<T>(params.clone());
 
 	let mut nominators_slashed = Vec::new();
-	reward_payout += slash_nominators::<T>(params.clone(), prior_slash_p, &mut nominators_slashed);
+	let (nom_slashed, nom_reward_payout) =
+		slash_nominators::<T>(params.clone(), &mut nominators_slashed);
+	reward_payout += nom_reward_payout;
 
-	Some(UnappliedSlash {
+	(nom_slashed + val_slashed > Zero::zero()).then_some(UnappliedSlash {
 		validator: params.stash.clone(),
 		own: val_slashed,
-		others: nominators_slashed,
-		reporters: Vec::new(),
+		others: WeakBoundedVec::force_from(
+			nominators_slashed,
+			Some("slashed nominators not expected to be larger than the bounds"),
+		),
+		reporter: None,
 		payout: reward_payout,
 	})
 }
@@ -316,82 +490,72 @@ fn kick_out_if_recent<T: Config>(params: SlashParams<T>) {
 		// Check https://github.com/paritytech/polkadot-sdk/issues/2650 for details
 		spans.end_span(params.now);
 	}
-
-	add_offending_validator::<T>(&params);
 }
 
-/// Inform the [`DisablingStrategy`] implementation about the new offender and disable the list of
-/// validators provided by [`decision`].
-fn add_offending_validator<T: Config>(params: &SlashParams<T>) {
-	DisabledValidators::<T>::mutate(|disabled| {
-		let new_severity = OffenceSeverity(params.slash);
-		let decision =
-			T::DisablingStrategy::decision(params.stash, new_severity, params.slash_era, &disabled);
-
-		if let Some(offender_idx) = decision.disable {
-			// Check if the offender is already disabled
-			match disabled.binary_search_by_key(&offender_idx, |(index, _)| *index) {
-				// Offender is already disabled, update severity if the new one is higher
-				Ok(index) => {
-					let (_, old_severity) = &mut disabled[index];
-					if new_severity > *old_severity {
-						*old_severity = new_severity;
-					}
-				},
-				Err(index) => {
-					// Offender is not disabled, add to `DisabledValidators` and disable it
-					if disabled.try_insert(index, (offender_idx, new_severity)).defensive().is_ok()
-					{
-						// Propagate disablement to session level
-						T::SessionInterface::disable_validator(offender_idx);
-						// Emit event that a validator got disabled
-						<Pallet<T>>::deposit_event(super::Event::<T>::ValidatorDisabled {
-							stash: params.stash.clone(),
-						});
-					}
-				},
-			}
-		}
+/// Compute the slash for a validator. Returns the amount slashed and the reward payout.
+fn slash_validator<T: Config>(params: SlashParams<T>) -> (BalanceOf<T>, BalanceOf<T>) {
+	let own_slash = params.slash * params.exposure.exposure_metadata.own;
+	log!(
+		warn,
+		"🦹 slashing validator {:?} of stake: {:?} with {:?}% for {:?} in era {:?}",
+		params.stash,
+		params.exposure.exposure_metadata.own,
+		params.slash,
+		own_slash,
+		params.slash_era,
+	);
 
-		if let Some(reenable_idx) = decision.reenable {
-			// Remove the validator from `DisabledValidators` and re-enable it.
-			if let Ok(index) = disabled.binary_search_by_key(&reenable_idx, |(index, _)| *index) {
-				disabled.remove(index);
-				// Propagate re-enablement to session level
-				T::SessionInterface::enable_validator(reenable_idx);
-				// Emit event that a validator got re-enabled
-				let reenabled_stash =
-					T::SessionInterface::validators()[reenable_idx as usize].clone();
-				<Pallet<T>>::deposit_event(super::Event::<T>::ValidatorReenabled {
-					stash: reenabled_stash,
-				});
-			}
+	if own_slash == Zero::zero() {
+		// kick out the validator even if they won't be slashed,
+		// as long as the misbehavior is from their most recent slashing span.
+		kick_out_if_recent::<T>(params);
+		return (Zero::zero(), Zero::zero())
+	}
+
+	// apply slash to validator.
+	let mut reward_payout = Zero::zero();
+	let mut val_slashed = Zero::zero();
+
+	{
+		let mut spans = fetch_spans::<T>(
+			params.stash,
+			params.window_start,
+			&mut reward_payout,
+			&mut val_slashed,
+			params.reward_proportion,
+		);
+
+		let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash);
+
+		if target_span == Some(spans.span_index()) {
+			// misbehavior occurred within the current slashing span - end current span.
+			// Check <https://github.com/paritytech/polkadot-sdk/issues/2650> for details.
+			spans.end_span(params.now);
 		}
-	});
+	}
 
-	// `DisabledValidators` should be kept sorted
-	debug_assert!(DisabledValidators::<T>::get().windows(2).all(|pair| pair[0] < pair[1]));
+	(val_slashed, reward_payout)
 }
 
 /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator.
 ///
-/// Returns the amount of reward to pay out.
+/// Returns the total amount slashed and amount of reward to pay out.
 fn slash_nominators<T: Config>(
 	params: SlashParams<T>,
-	prior_slash_p: Perbill,
 	nominators_slashed: &mut Vec<(T::AccountId, BalanceOf<T>)>,
-) -> BalanceOf<T> {
-	let mut reward_payout = Zero::zero();
+) -> (BalanceOf<T>, BalanceOf<T>) {
+	let mut reward_payout = BalanceOf::<T>::zero();
+	let mut total_slashed = BalanceOf::<T>::zero();
 
-	nominators_slashed.reserve(params.exposure.others.len());
-	for nominator in &params.exposure.others {
+	nominators_slashed.reserve(params.exposure.exposure_page.others.len());
+	for nominator in &params.exposure.exposure_page.others {
 		let stash = &nominator.who;
 		let mut nom_slashed = Zero::zero();
 
-		// the era slash of a nominator always grows, if the validator
-		// had a new max slash for the era.
+		// the era slash of a nominator always grows, if the validator had a new max slash for the
+		// era.
 		let era_slash = {
-			let own_slash_prior = prior_slash_p * nominator.value;
+			let own_slash_prior = params.prior_slash * nominator.value;
 			let own_slash_by_validator = params.slash * nominator.value;
 			let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior);
 
@@ -421,9 +585,10 @@ fn slash_nominators<T: Config>(
 			}
 		}
 		nominators_slashed.push((stash.clone(), nom_slashed));
+		total_slashed.saturating_accrue(nom_slashed);
 	}
 
-	reward_payout
+	(total_slashed, reward_payout)
 }
 
 // helper struct for managing a set of spans we are currently inspecting.
@@ -637,22 +802,25 @@ pub fn do_slash<T: Config>(
 }
 
 /// Apply a previously-unapplied slash.
-pub(crate) fn apply_slash<T: Config>(
-	unapplied_slash: UnappliedSlash<T::AccountId, BalanceOf<T>>,
-	slash_era: EraIndex,
-) {
+pub(crate) fn apply_slash<T: Config>(unapplied_slash: UnappliedSlash<T>, slash_era: EraIndex) {
 	let mut slashed_imbalance = NegativeImbalanceOf::<T>::zero();
 	let mut reward_payout = unapplied_slash.payout;
 
-	do_slash::<T>(
-		&unapplied_slash.validator,
-		unapplied_slash.own,
-		&mut reward_payout,
-		&mut slashed_imbalance,
-		slash_era,
-	);
+	if unapplied_slash.own > Zero::zero() {
+		do_slash::<T>(
+			&unapplied_slash.validator,
+			unapplied_slash.own,
+			&mut reward_payout,
+			&mut slashed_imbalance,
+			slash_era,
+		);
+	}
 
 	for &(ref nominator, nominator_slash) in &unapplied_slash.others {
+		if nominator_slash.is_zero() {
+			continue
+		}
+
 		do_slash::<T>(
 			nominator,
 			nominator_slash,
@@ -662,7 +830,11 @@ pub(crate) fn apply_slash<T: Config>(
 		);
 	}
 
-	pay_reporters::<T>(reward_payout, slashed_imbalance, &unapplied_slash.reporters);
+	pay_reporters::<T>(
+		reward_payout,
+		slashed_imbalance,
+		&unapplied_slash.reporter.map(|v| crate::vec![v]).unwrap_or_default(),
+	);
 }
 
 /// Apply a reward payout to some reporters, paying the rewards out of the slashed imbalance.
diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs
index 8fe3c8f17751ca28b8056517b508fbca567ac2b6..64639648073ba5cb28ebdece09828305bfb92dbf 100644
--- a/substrate/frame/staking/src/tests.rs
+++ b/substrate/frame/staking/src/tests.rs
@@ -34,9 +34,14 @@ use frame_support::{
 	},
 	BoundedVec,
 };
-
 use mock::*;
 use pallet_balances::Error as BalancesError;
+use pallet_session::{
+	disabling::{
+		DisablingStrategy, UpToLimitDisablingStrategy, UpToLimitWithReEnablingDisablingStrategy,
+	},
+	Event as SessionEvent,
+};
 use sp_runtime::{
 	assert_eq_error_rate, bounded_vec,
 	traits::{BadOrigin, Dispatchable},
@@ -44,7 +49,7 @@ use sp_runtime::{
 };
 use sp_staking::{
 	offence::{OffenceDetails, OnOffenceHandler},
-	SessionIndex,
+	SessionIndex, StakingInterface,
 };
 use substrate_test_utils::assert_eq_uvec;
 
@@ -748,10 +753,7 @@ fn nominators_also_get_slashed_pro_rata() {
 			let exposed_nominator = initial_exposure.others.first().unwrap().value;
 
 			// 11 goes offline
-			on_offence_now(
-				&[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }],
-				&[slash_percent],
-			);
+			on_offence_now(&[offence_from(11, None)], &[slash_percent], true);
 
 			// both stakes must have been decreased.
 			assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake);
@@ -2448,13 +2450,7 @@ fn reward_validator_slashing_validator_does_not_overflow() {
 		);
 
 		// Check slashing
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(100)],
-		);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true);
 
 		assert_eq!(asset::stakeable_balance::<Test>(&11), stake - 1);
 		assert_eq!(asset::stakeable_balance::<Test>(&2), 1);
@@ -2547,13 +2543,7 @@ fn era_is_always_same_length() {
 #[test]
 fn offence_doesnt_force_new_era() {
 	ExtBuilder::default().build_and_execute(|| {
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(5)],
-		);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)], true);
 
 		assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 	});
@@ -2565,13 +2555,7 @@ fn offence_ensures_new_era_without_clobbering() {
 		assert_ok!(Staking::force_new_era_always(RuntimeOrigin::root()));
 		assert_eq!(ForceEra::<Test>::get(), Forcing::ForceAlways);
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(5)],
-		);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)], true);
 
 		assert_eq!(ForceEra::<Test>::get(), Forcing::ForceAlways);
 	});
@@ -2589,13 +2573,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() {
 			assert!(Session::validators().contains(&11));
 			assert!(<Validators<Test>>::contains_key(11));
 
-			on_offence_now(
-				&[OffenceDetails {
-					offender: (11, Staking::eras_stakers(active_era(), &11)),
-					reporters: vec![],
-				}],
-				&[Perbill::from_percent(0)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true);
 
 			assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 			assert!(is_disabled(11));
@@ -2615,16 +2593,10 @@ fn slashing_performed_according_exposure() {
 		assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000);
 
 		// Handle an offence with a historical exposure.
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Exposure { total: 500, own: 500, others: vec![] }),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(50)],
-		);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)], true);
 
 		// The stash account should be slashed for 250 (50% of 500).
-		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000 - 250);
+		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000 / 2);
 	});
 }
 
@@ -2639,13 +2611,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() {
 			assert!(<Validators<Test>>::contains_key(11));
 			assert!(Session::validators().contains(&11));
 
-			on_offence_now(
-				&[OffenceDetails {
-					offender: (11, Staking::eras_stakers(active_era(), &11)),
-					reporters: vec![],
-				}],
-				&[Perbill::from_percent(0)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true);
 
 			assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 			assert!(is_disabled(11));
@@ -2661,14 +2627,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() {
 			mock::start_active_era(3);
 
 			// an offence committed in era 1 is reported in era 3
-			on_offence_in_era(
-				&[OffenceDetails {
-					offender: (11, Staking::eras_stakers(active_era(), &11)),
-					reporters: vec![],
-				}],
-				&[Perbill::from_percent(0)],
-				1,
-			);
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(0)], 1, true);
 
 			// the validator doesn't get disabled for an old offence
 			assert!(Validators::<Test>::iter().any(|(stash, _)| stash == 11));
@@ -2678,13 +2637,11 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() {
 			assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 
 			on_offence_in_era(
-				&[OffenceDetails {
-					offender: (11, Staking::eras_stakers(active_era(), &11)),
-					reporters: vec![],
-				}],
+				&[offence_from(11, None)],
 				// NOTE: A 100% slash here would clean up the account, causing de-registration.
 				&[Perbill::from_percent(95)],
 				1,
+				true,
 			);
 
 			// the validator doesn't get disabled again
@@ -2696,9 +2653,9 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() {
 }
 
 #[test]
-fn reporters_receive_their_slice() {
-	// This test verifies that the reporters of the offence receive their slice from the slashed
-	// amount.
+fn only_first_reporter_receive_the_slice() {
+	// This test verifies that the first reporter of the offence receive their slice from the
+	// slashed amount.
 	ExtBuilder::default().build_and_execute(|| {
 		// The reporters' reward is calculated from the total exposure.
 		let initial_balance = 1125;
@@ -2706,19 +2663,16 @@ fn reporters_receive_their_slice() {
 		assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance);
 
 		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![1, 2],
-			}],
+			&[OffenceDetails { offender: (11, ()), reporters: vec![1, 2] }],
 			&[Perbill::from_percent(50)],
+			true,
 		);
 
 		// F1 * (reward_proportion * slash - 0)
 		// 50% * (10% * initial_balance / 2)
 		let reward = (initial_balance / 20) / 2;
-		let reward_each = reward / 2; // split into two pieces.
-		assert_eq!(asset::total_balance::<Test>(&1), 10 + reward_each);
-		assert_eq!(asset::total_balance::<Test>(&2), 20 + reward_each);
+		assert_eq!(asset::total_balance::<Test>(&1), 10 + reward);
+		assert_eq!(asset::total_balance::<Test>(&2), 20 + 0);
 	});
 }
 
@@ -2732,26 +2686,14 @@ fn subsequent_reports_in_same_span_pay_out_less() {
 
 		assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance);
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![1],
-			}],
-			&[Perbill::from_percent(20)],
-		);
+		on_offence_now(&[offence_from(11, Some(1))], &[Perbill::from_percent(20)], true);
 
 		// F1 * (reward_proportion * slash - 0)
 		// 50% * (10% * initial_balance * 20%)
 		let reward = (initial_balance / 5) / 20;
 		assert_eq!(asset::total_balance::<Test>(&1), 10 + reward);
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![1],
-			}],
-			&[Perbill::from_percent(50)],
-		);
+		on_offence_now(&[offence_from(11, Some(1))], &[Perbill::from_percent(50)], true);
 
 		let prior_payout = reward;
 
@@ -2779,17 +2721,9 @@ fn invulnerables_are_not_slashed() {
 			.collect();
 
 		on_offence_now(
-			&[
-				OffenceDetails {
-					offender: (11, Staking::eras_stakers(active_era(), &11)),
-					reporters: vec![],
-				},
-				OffenceDetails {
-					offender: (21, Staking::eras_stakers(active_era(), &21)),
-					reporters: vec![],
-				},
-			],
+			&[offence_from(11, None), offence_from(21, None)],
 			&[Perbill::from_percent(50), Perbill::from_percent(20)],
+			true,
 		);
 
 		// The validator 11 hasn't been slashed, but 21 has been.
@@ -2813,13 +2747,7 @@ fn dont_slash_if_fraction_is_zero() {
 	ExtBuilder::default().build_and_execute(|| {
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(0)],
-		);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true);
 
 		// The validator hasn't been slashed. The new era is not forced.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
@@ -2834,36 +2762,18 @@ fn only_slash_for_max_in_era() {
 	ExtBuilder::default().build_and_execute(|| {
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(50)],
-		);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)], true);
 
 		// The validator has been slashed and has been force-chilled.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 500);
 		assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(25)],
-		);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)], true);
 
 		// The validator has not been slashed additionally.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 500);
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(60)],
-		);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(60)], true);
 
 		// The validator got slashed 10% more.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 400);
@@ -2879,25 +2789,13 @@ fn garbage_collection_after_slashing() {
 		.build_and_execute(|| {
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 2000);
 
-			on_offence_now(
-				&[OffenceDetails {
-					offender: (11, Staking::eras_stakers(active_era(), &11)),
-					reporters: vec![],
-				}],
-				&[Perbill::from_percent(10)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 2000 - 200);
 			assert!(SlashingSpans::<Test>::get(&11).is_some());
 			assert_eq!(SpanSlash::<Test>::get(&(11, 0)).amount(), &200);
 
-			on_offence_now(
-				&[OffenceDetails {
-					offender: (11, Staking::eras_stakers(active_era(), &11)),
-					reporters: vec![],
-				}],
-				&[Perbill::from_percent(100)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true);
 
 			// validator and nominator slash in era are garbage-collected by era change,
 			// so we don't test those here.
@@ -2935,13 +2833,7 @@ fn garbage_collection_on_window_pruning() {
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
 		let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value;
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(now, &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(10)],
-		);
+		add_slash(&11);
 
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 900);
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000 - (nominated_value / 10));
@@ -2979,14 +2871,7 @@ fn slashing_nominators_by_span_max() {
 		let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value;
 		let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value;
 
-		on_offence_in_era(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(10)],
-			2,
-		);
+		on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(10)], 2, true);
 
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 900);
 
@@ -3005,14 +2890,7 @@ fn slashing_nominators_by_span_max() {
 		assert_eq!(get_span(101).iter().collect::<Vec<_>>(), expected_spans);
 
 		// second slash: higher era, higher value, same span.
-		on_offence_in_era(
-			&[OffenceDetails {
-				offender: (21, Staking::eras_stakers(active_era(), &21)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(30)],
-			3,
-		);
+		on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(30)], 3, true);
 
 		// 11 was not further slashed, but 21 and 101 were.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 900);
@@ -3026,14 +2904,7 @@ fn slashing_nominators_by_span_max() {
 
 		// third slash: in same era and on same validator as first, higher
 		// in-era value, but lower slash value than slash 2.
-		on_offence_in_era(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(20)],
-			2,
-		);
+		on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(20)], 2, true);
 
 		// 11 was further slashed, but 21 and 101 were not.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 800);
@@ -3060,13 +2931,7 @@ fn slashes_are_summed_across_spans() {
 
 		let get_span = |account| SlashingSpans::<Test>::get(&account).unwrap();
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (21, Staking::eras_stakers(active_era(), &21)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(10)],
-		);
+		on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)], true);
 
 		let expected_spans = vec![
 			slashing::SlashingSpan { index: 1, start: 4, length: None },
@@ -3083,13 +2948,7 @@ fn slashes_are_summed_across_spans() {
 
 		assert_eq!(Staking::slashable_balance_of(&21), 900);
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (21, Staking::eras_stakers(active_era(), &21)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(10)],
-		);
+		on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)], true);
 
 		let expected_spans = vec![
 			slashing::SlashingSpan { index: 2, start: 5, length: None },
@@ -3115,13 +2974,10 @@ fn deferred_slashes_are_deferred() {
 
 		System::reset_events();
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(10)],
-		);
+		// only 1 page of exposure, so slashes will be applied in one block.
+		assert_eq!(EraInfo::<Test>::get_page_count(1, &11), 1);
+
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
 		// nominations are not removed regardless of the deferring.
 		assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3134,27 +2990,37 @@ fn deferred_slashes_are_deferred() {
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
 
-		mock::start_active_era(3);
+		assert!(matches!(
+			staking_events_since_last_call().as_slice(),
+			&[
+				Event::OffenceReported { validator: 11, offence_era: 1, .. },
+				Event::SlashComputed { offence_era: 1, slash_era: 3, page: 0, .. },
+				Event::PagedElectionProceeded { page: 0, result: Ok(2) },
+				Event::StakersElected,
+				..,
+			]
+		));
 
+		// the slashes for era 1 will start applying in era 3, to end before era 4.
+		mock::start_active_era(3);
+		// Slashes not applied yet. Will apply in the next block after era starts.
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
-
-		// at the start of era 4, slashes from era 1 are processed,
-		// after being deferred for at least 2 full eras.
-		mock::start_active_era(4);
-
+		// trigger slashing by advancing block.
+		advance_blocks(1);
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 900);
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000 - (nominated_value / 10));
 
 		assert!(matches!(
 			staking_events_since_last_call().as_slice(),
 			&[
-				Event::SlashReported { validator: 11, slash_era: 1, .. },
+				// era 3 elections
 				Event::PagedElectionProceeded { page: 0, result: Ok(2) },
 				Event::StakersElected,
-				..,
+				Event::EraPaid { .. },
+				// slashes applied from era 1 between era 3 and 4.
 				Event::Slashed { staker: 11, amount: 100 },
-				Event::Slashed { staker: 101, amount: 12 }
+				Event::Slashed { staker: 101, amount: 12 },
 			]
 		));
 	})
@@ -3166,25 +3032,26 @@ fn retroactive_deferred_slashes_two_eras_before() {
 		assert_eq!(BondingDuration::get(), 3);
 
 		mock::start_active_era(1);
-		let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11);
-
-		mock::start_active_era(3);
 
 		assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
 
 		System::reset_events();
 		on_offence_in_era(
-			&[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }],
+			&[offence_from(11, None)],
 			&[Perbill::from_percent(10)],
-			1, // should be deferred for two full eras, and applied at the beginning of era 4.
+			1, // should be deferred for two eras, and applied at the beginning of era 3.
+			true,
 		);
 
-		mock::start_active_era(4);
+		mock::start_active_era(3);
+		// Slashes not applied yet. Will apply in the next block after era starts.
+		advance_blocks(1);
 
 		assert!(matches!(
 			staking_events_since_last_call().as_slice(),
 			&[
-				Event::SlashReported { validator: 11, slash_era: 1, .. },
+				Event::OffenceReported { validator: 11, offence_era: 1, .. },
+				Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 },
 				..,
 				Event::Slashed { staker: 11, amount: 100 },
 				Event::Slashed { staker: 101, amount: 12 }
@@ -3198,9 +3065,6 @@ fn retroactive_deferred_slashes_one_before() {
 	ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| {
 		assert_eq!(BondingDuration::get(), 3);
 
-		mock::start_active_era(1);
-		let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11);
-
 		// unbond at slash era.
 		mock::start_active_era(2);
 		assert_ok!(Staking::chill(RuntimeOrigin::signed(11)));
@@ -3209,21 +3073,23 @@ fn retroactive_deferred_slashes_one_before() {
 		mock::start_active_era(3);
 		System::reset_events();
 		on_offence_in_era(
-			&[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }],
+			&[offence_from(11, None)],
 			&[Perbill::from_percent(10)],
-			2, // should be deferred for two full eras, and applied at the beginning of era 5.
+			2, // should be deferred for two eras, and applied before the beginning of era 4.
+			true,
 		);
 
 		mock::start_active_era(4);
 
 		assert_eq!(Staking::ledger(11.into()).unwrap().total, 1000);
-		// slash happens after the next line.
+		// slash happens at next blocks.
+		advance_blocks(1);
 
-		mock::start_active_era(5);
 		assert!(matches!(
 			staking_events_since_last_call().as_slice(),
 			&[
-				Event::SlashReported { validator: 11, slash_era: 2, .. },
+				Event::OffenceReported { validator: 11, offence_era: 2, .. },
+				Event::SlashComputed { offence_era: 2, slash_era: 4, offender: 11, page: 0 },
 				..,
 				Event::Slashed { staker: 11, amount: 100 },
 				Event::Slashed { staker: 101, amount: 12 }
@@ -3249,13 +3115,7 @@ fn staker_cannot_bail_deferred_slash() {
 		let exposure = Staking::eras_stakers(active_era(), &11);
 		let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value;
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (11, Staking::eras_stakers(active_era(), &11)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(10)],
-		);
+		on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
 		// now we chill
 		assert_ok!(Staking::chill(RuntimeOrigin::signed(101)));
@@ -3324,23 +3184,44 @@ fn remove_deferred() {
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
 		let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value;
 
-		// deferred to start of era 4.
-		on_offence_now(
-			&[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }],
-			&[Perbill::from_percent(10)],
-		);
+		// deferred to start of era 3.
+		let slash_fraction_one = Perbill::from_percent(10);
+		on_offence_now(&[offence_from(11, None)], &[slash_fraction_one], true);
 
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
 
 		mock::start_active_era(2);
 
-		// reported later, but deferred to start of era 4 as well.
+		// reported later, but deferred to start of era 3 as well.
 		System::reset_events();
-		on_offence_in_era(
-			&[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }],
-			&[Perbill::from_percent(15)],
-			1,
+		let slash_fraction_two = Perbill::from_percent(15);
+		on_offence_in_era(&[offence_from(11, None)], &[slash_fraction_two], 1, true);
+
+		assert_eq!(
+			UnappliedSlashes::<Test>::iter_prefix(&3).collect::<Vec<_>>(),
+			vec![
+				(
+					(11, slash_fraction_one, 0),
+					UnappliedSlash {
+						validator: 11,
+						own: 100,
+						others: bounded_vec![(101, 12)],
+						reporter: None,
+						payout: 5
+					}
+				),
+				(
+					(11, slash_fraction_two, 0),
+					UnappliedSlash {
+						validator: 11,
+						own: 50,
+						others: bounded_vec![(101, 7)],
+						reporter: None,
+						payout: 6
+					}
+				),
+			]
 		);
 
 		// fails if empty
@@ -3349,8 +3230,13 @@ fn remove_deferred() {
 			Error::<Test>::EmptyTargets
 		);
 
-		// cancel one of them.
-		assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0]));
+		// cancel the slash with 10%.
+		assert_ok!(Staking::cancel_deferred_slash(
+			RuntimeOrigin::root(),
+			3,
+			vec![(11, slash_fraction_one, 0)]
+		));
+		assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&3).count(), 1);
 
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
@@ -3360,23 +3246,29 @@ fn remove_deferred() {
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
 
-		// at the start of era 4, slashes from era 1 are processed,
-		// after being deferred for at least 2 full eras.
-		mock::start_active_era(4);
+		// at the next blocks, slashes from era 1 are processed, 1 page a block,
+		// after being deferred for 2 eras.
+		advance_blocks(1);
 
 		// the first slash for 10% was cancelled, but the 15% one not.
 		assert!(matches!(
 			staking_events_since_last_call().as_slice(),
 			&[
-				Event::SlashReported { validator: 11, slash_era: 1, .. },
+				Event::OffenceReported { validator: 11, offence_era: 1, .. },
+				Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 },
+				Event::SlashCancelled {
+					slash_era: 3,
+					slash_key: (11, fraction, 0),
+					payout: 5
+				},
 				..,
 				Event::Slashed { staker: 11, amount: 50 },
 				Event::Slashed { staker: 101, amount: 7 }
-			]
+			] if fraction == slash_fraction_one
 		));
 
 		let slash_10 = Perbill::from_percent(10);
-		let slash_15 = Perbill::from_percent(15);
+		let slash_15 = slash_fraction_two;
 		let initial_slash = slash_10 * nominated_value;
 
 		let total_slash = slash_15 * nominated_value;
@@ -3390,67 +3282,48 @@ fn remove_deferred() {
 
 #[test]
 fn remove_multi_deferred() {
-	ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| {
-		mock::start_active_era(1);
+	ExtBuilder::default()
+		.slash_defer_duration(2)
+		.validator_count(4)
+		.set_status(41, StakerStatus::Validator)
+		.set_status(51, StakerStatus::Validator)
+		.build_and_execute(|| {
+			mock::start_active_era(1);
 
-		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
+			assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
+			assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
 
-		let exposure = Staking::eras_stakers(active_era(), &11);
-		assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
-		on_offence_now(
-			&[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }],
-			&[Perbill::from_percent(10)],
-		);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)], true);
 
-		on_offence_now(
-			&[OffenceDetails {
-				offender: (21, Staking::eras_stakers(active_era(), &21)),
-				reporters: vec![],
-			}],
-			&[Perbill::from_percent(10)],
-		);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)], true);
 
-		on_offence_now(
-			&[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }],
-			&[Perbill::from_percent(25)],
-		);
+			on_offence_now(&[offence_from(41, None)], &[Perbill::from_percent(25)], true);
 
-		on_offence_now(
-			&[OffenceDetails { offender: (42, exposure.clone()), reporters: vec![] }],
-			&[Perbill::from_percent(25)],
-		);
+			on_offence_now(&[offence_from(51, None)], &[Perbill::from_percent(25)], true);
 
-		on_offence_now(
-			&[OffenceDetails { offender: (69, exposure.clone()), reporters: vec![] }],
-			&[Perbill::from_percent(25)],
-		);
+			// there are 5 slashes to be applied in era 3.
+			assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&3).count(), 5);
 
-		assert_eq!(UnappliedSlashes::<Test>::get(&4).len(), 5);
-
-		// fails if list is not sorted
-		assert_noop!(
-			Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![2, 0, 4]),
-			Error::<Test>::NotSortedAndUnique
-		);
-		// fails if list is not unique
-		assert_noop!(
-			Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![0, 2, 2]),
-			Error::<Test>::NotSortedAndUnique
-		);
-		// fails if bad index
-		assert_noop!(
-			Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![1, 2, 3, 4, 5]),
-			Error::<Test>::InvalidSlashIndex
-		);
-
-		assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0, 2, 4]));
+			// lets cancel 3 of them.
+			assert_ok!(Staking::cancel_deferred_slash(
+				RuntimeOrigin::root(),
+				3,
+				vec![
+					(11, Perbill::from_percent(10), 0),
+					(11, Perbill::from_percent(25), 0),
+					(51, Perbill::from_percent(25), 0),
+				]
+			));
 
-		let slashes = UnappliedSlashes::<Test>::get(&4);
-		assert_eq!(slashes.len(), 2);
-		assert_eq!(slashes[0].validator, 21);
-		assert_eq!(slashes[1].validator, 42);
-	})
+			let slashes = UnappliedSlashes::<Test>::iter_prefix(&3).collect::<Vec<_>>();
+			assert_eq!(slashes.len(), 2);
+			// the first item in the remaining slashes belongs to validator 41.
+			assert_eq!(slashes[0].0, (41, Perbill::from_percent(25), 0));
+			// the second and last item in the remaining slashes belongs to validator 21.
+			assert_eq!(slashes[1].0, (21, Perbill::from_percent(10), 0));
+		})
 }
 
 #[test]
@@ -3479,10 +3352,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid
 			assert_eq!(exposure_11.total, 1000 + 125);
 			assert_eq!(exposure_21.total, 1000 + 375);
 
-			on_offence_now(
-				&[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(10)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
 
 			assert_eq!(
 				staking_events_since_last_call(),
@@ -3490,17 +3360,22 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid
 					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(10),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 },
 					Event::Slashed { staker: 11, amount: 100 },
 					Event::Slashed { staker: 101, amount: 12 },
 				]
 			);
 
+			assert!(matches!(
+				session_events().as_slice(),
+				&[.., SessionEvent::ValidatorDisabled { validator: 11 }]
+			));
+
 			// post-slash balance
 			let nominator_slash_amount_11 = 125 / 10;
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 900);
@@ -3537,23 +3412,14 @@ fn non_slashable_offence_disables_validator() {
 			mock::start_active_era(1);
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]);
 
-			let exposure_11 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &21);
-
 			// offence with no slash associated
-			on_offence_now(
-				&[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }],
-				&[Perbill::zero()],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::zero()], true);
 
 			// it does NOT affect the nominator.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
 
 			// offence that slashes 25% of the bond
-			on_offence_now(
-				&[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(25)],
-			);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)], true);
 
 			// it DOES NOT affect the nominator.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3564,23 +3430,31 @@ fn non_slashable_offence_disables_validator() {
 					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(0),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 21,
 						fraction: Perbill::from_percent(25),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 21 },
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 },
 					Event::Slashed { staker: 21, amount: 250 },
 					Event::Slashed { staker: 101, amount: 94 }
 				]
 			);
 
+			assert!(matches!(
+				session_events().as_slice(),
+				&[
+					..,
+					SessionEvent::ValidatorDisabled { validator: 11 },
+					SessionEvent::ValidatorDisabled { validator: 21 },
+				]
+			));
+
 			// the offence for validator 11 wasn't slashable but it is disabled
 			assert!(is_disabled(11));
 			// validator 21 gets disabled too
@@ -3598,18 +3472,11 @@ fn slashing_independent_of_disabling_validator() {
 			mock::start_active_era(1);
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51]);
 
-			let exposure_11 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &21);
-
 			let now = ActiveEra::<Test>::get().unwrap().index;
 
 			// --- Disable without a slash ---
 			// offence with no slash associated
-			on_offence_in_era(
-				&[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }],
-				&[Perbill::zero()],
-				now,
-			);
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::zero()], now, true);
 
 			// nomination remains untouched.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3619,18 +3486,10 @@ fn slashing_independent_of_disabling_validator() {
 
 			// --- Slash without disabling ---
 			// offence that slashes 50% of the bond (setup for next slash)
-			on_offence_in_era(
-				&[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(50)],
-				now,
-			);
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(50)], now, true);
 
 			// offence that slashes 25% of the bond but does not disable
-			on_offence_in_era(
-				&[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(25)],
-				now,
-			);
+			on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(25)], now, true);
 
 			// nomination remains untouched.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3645,28 +3504,39 @@ fn slashing_independent_of_disabling_validator() {
 					Event::PagedElectionProceeded { page: 0, result: Ok(5) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(0),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(50),
-						slash_era: 1
+						offence_era: 1
 					},
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 },
 					Event::Slashed { staker: 11, amount: 500 },
 					Event::Slashed { staker: 101, amount: 62 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 21,
 						fraction: Perbill::from_percent(25),
-						slash_era: 1
+						offence_era: 1
 					},
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 },
 					Event::Slashed { staker: 21, amount: 250 },
 					Event::Slashed { staker: 101, amount: 94 }
 				]
 			);
+
+			assert_eq!(
+				session_events(),
+				vec![
+					SessionEvent::NewSession { session_index: 1 },
+					SessionEvent::NewSession { session_index: 2 },
+					SessionEvent::NewSession { session_index: 3 },
+					SessionEvent::ValidatorDisabled { validator: 11 }
+				]
+			);
 		});
 }
 
@@ -3688,25 +3558,14 @@ fn offence_threshold_doesnt_plan_new_era() {
 
 			// we have 4 validators and an offending validator threshold of 1/3,
 			// even if the third validator commits an offence a new era should not be forced
-
-			let exposure_11 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &21);
-			let exposure_31 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &31);
-
-			on_offence_now(
-				&[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(50)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)], true);
 
 			// 11 should be disabled because the byzantine threshold is 1
 			assert!(is_disabled(11));
 
 			assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 
-			on_offence_now(
-				&[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }],
-				&[Perbill::zero()],
-			);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::zero()], true);
 
 			// 21 should not be disabled because the number of disabled validators will be above the
 			// byzantine threshold
@@ -3714,10 +3573,7 @@ fn offence_threshold_doesnt_plan_new_era() {
 
 			assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
 
-			on_offence_now(
-				&[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }],
-				&[Perbill::zero()],
-			);
+			on_offence_now(&[offence_from(31, None)], &[Perbill::zero()], true);
 
 			// same for 31
 			assert!(!is_disabled(31));
@@ -3739,13 +3595,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() {
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]);
 			assert_eq!(<Test as Config>::SessionsPerEra::get(), 3);
 
-			let exposure_11 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(ActiveEra::<Test>::get().unwrap().index, &21);
-
-			on_offence_now(
-				&[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(25)],
-			);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)], true);
 
 			// nominations are not updated.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3759,10 +3609,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() {
 			assert!(is_disabled(21));
 
 			// validator 11 commits an offence
-			on_offence_now(
-				&[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(25)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)], true);
 
 			// nominations are not updated.
 			assert_eq!(Nominators::<Test>::get(101).unwrap().targets, vec![11, 21]);
@@ -3878,14 +3725,9 @@ fn zero_slash_keeps_nominators() {
 			mock::start_active_era(1);
 
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
-
-			let exposure = Staking::eras_stakers(active_era(), &11);
 			assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
 
-			on_offence_now(
-				&[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(0)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true);
 
 			assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
 			assert_eq!(asset::stakeable_balance::<Test>(&101), 2000);
@@ -4878,6 +4720,7 @@ fn bond_during_era_does_not_populate_legacy_claimed_rewards() {
 }
 
 #[test]
+#[ignore]
 fn offences_weight_calculated_correctly() {
 	ExtBuilder::default().nominate(true).build_and_execute(|| {
 		// On offence with zero offenders: 4 Reads, 1 Write
@@ -4900,7 +4743,7 @@ fn offences_weight_calculated_correctly() {
 			>,
 		> = (1..10)
 			.map(|i| OffenceDetails {
-				offender: (i, Staking::eras_stakers(active_era(), &i)),
+				offender: (i, ()),
 				reporters: vec![],
 			})
 			.collect();
@@ -4914,10 +4757,7 @@ fn offences_weight_calculated_correctly() {
 		);
 
 		// On Offence with one offenders, Applied
-		let one_offender = [OffenceDetails {
-			offender: (11, Staking::eras_stakers(active_era(), &11)),
-			reporters: vec![1],
-		}];
+		let one_offender = [offence_from(11, Some(1))];
 
 		let n = 1; // Number of offenders
 		let rw = 3 + 3 * n; // rw reads and writes
@@ -6951,13 +6791,7 @@ mod staking_interface {
 	#[test]
 	fn do_withdraw_unbonded_with_wrong_slash_spans_works_as_expected() {
 		ExtBuilder::default().build_and_execute(|| {
-			on_offence_now(
-				&[OffenceDetails {
-					offender: (11, Staking::eras_stakers(active_era(), &11)),
-					reporters: vec![],
-				}],
-				&[Perbill::from_percent(100)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true);
 
 			assert_eq!(Staking::bonded(&11), Some(11));
 
@@ -7241,13 +7075,7 @@ mod staking_unchecked {
 				let exposed_nominator = initial_exposure.others.first().unwrap().value;
 
 				// 11 goes offline
-				on_offence_now(
-					&[OffenceDetails {
-						offender: (11, initial_exposure.clone()),
-						reporters: vec![],
-					}],
-					&[slash_percent],
-				);
+				on_offence_now(&[offence_from(11, None)], &[slash_percent], true);
 
 				let slash_amount = slash_percent * exposed_stake;
 				let validator_share =
@@ -7313,13 +7141,7 @@ mod staking_unchecked {
 				let nominator_stake = Staking::ledger(101.into()).unwrap().total;
 
 				// 11 goes offline
-				on_offence_now(
-					&[OffenceDetails {
-						offender: (11, initial_exposure.clone()),
-						reporters: vec![],
-					}],
-					&[slash_percent],
-				);
+				on_offence_now(&[offence_from(11, None)], &[slash_percent], true);
 
 				// both stakes must have been decreased to 0.
 				assert_eq!(Staking::ledger(101.into()).unwrap().active, 0);
@@ -8231,39 +8053,20 @@ mod ledger_recovery {
 }
 
 mod byzantine_threshold_disabling_strategy {
-	use crate::{
-		tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy, UpToLimitDisablingStrategy,
-	};
+	use crate::tests::{DisablingStrategy, Test, UpToLimitDisablingStrategy};
 	use sp_runtime::Perbill;
-	use sp_staking::{offence::OffenceSeverity, EraIndex};
+	use sp_staking::offence::OffenceSeverity;
 
 	// Common test data - the stash of the offending validator, the era of the offence and the
 	// active set
 	const OFFENDER_ID: <Test as frame_system::Config>::AccountId = 7;
 	const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100));
 	const MIN_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0));
-	const SLASH_ERA: EraIndex = 1;
 	const ACTIVE_SET: [<Test as pallet_session::Config>::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7];
 	const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set
 
-	#[test]
-	fn dont_disable_for_ancient_offence() {
-		sp_io::TestExternalities::default().execute_with(|| {
-			let initially_disabled = vec![];
-			pallet_session::Validators::<Test>::put(ACTIVE_SET.to_vec());
-			ActiveEra::<Test>::put(ActiveEraInfo { index: 2, start: None });
-
-			let disabling_decision =
-				<UpToLimitDisablingStrategy as DisablingStrategy<Test>>::decision(
-					&OFFENDER_ID,
-					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
-					&initially_disabled,
-				);
-
-			assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none());
-		});
-	}
+	// todo(ank4n): Ensure there is a test that for older eras, the disabling strategy does not
+	// disable the validator.
 
 	#[test]
 	fn dont_disable_beyond_byzantine_threshold() {
@@ -8275,7 +8078,6 @@ mod byzantine_threshold_disabling_strategy {
 				<UpToLimitDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8293,7 +8095,6 @@ mod byzantine_threshold_disabling_strategy {
 				<UpToLimitDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8303,41 +8104,18 @@ mod byzantine_threshold_disabling_strategy {
 }
 
 mod disabling_strategy_with_reenabling {
-	use crate::{
-		tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy,
-		UpToLimitWithReEnablingDisablingStrategy,
-	};
+	use crate::tests::{DisablingStrategy, Test, UpToLimitWithReEnablingDisablingStrategy};
 	use sp_runtime::Perbill;
-	use sp_staking::{offence::OffenceSeverity, EraIndex};
+	use sp_staking::offence::OffenceSeverity;
 
 	// Common test data - the stash of the offending validator, the era of the offence and the
 	// active set
 	const OFFENDER_ID: <Test as frame_system::Config>::AccountId = 7;
 	const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100));
 	const LOW_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0));
-	const SLASH_ERA: EraIndex = 1;
 	const ACTIVE_SET: [<Test as pallet_session::Config>::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7];
 	const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set
 
-	#[test]
-	fn dont_disable_for_ancient_offence() {
-		sp_io::TestExternalities::default().execute_with(|| {
-			let initially_disabled = vec![];
-			pallet_session::Validators::<Test>::put(ACTIVE_SET.to_vec());
-			ActiveEra::<Test>::put(ActiveEraInfo { index: 2, start: None });
-
-			let disabling_decision =
-				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
-					&OFFENDER_ID,
-					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
-					&initially_disabled,
-				);
-
-			assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none());
-		});
-	}
-
 	#[test]
 	fn disable_when_below_byzantine_threshold() {
 		sp_io::TestExternalities::default().execute_with(|| {
@@ -8348,7 +8126,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8368,7 +8145,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8389,7 +8165,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					LOW_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8407,7 +8182,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8428,7 +8202,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8450,7 +8223,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8471,7 +8243,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					LOW_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8490,7 +8261,6 @@ mod disabling_strategy_with_reenabling {
 				<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
 					&OFFENDER_ID,
 					MAX_OFFENDER_SEVERITY,
-					SLASH_ERA,
 					&initially_disabled,
 				);
 
@@ -8511,19 +8281,9 @@ fn reenable_lower_offenders_mock() {
 			mock::start_active_era(1);
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]);
 
-			let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21);
-			let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31);
-
 			// offence with a low slash
-			on_offence_now(
-				&[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(10)],
-			);
-			on_offence_now(
-				&[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(20)],
-			);
+			on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true);
+			on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(20)], true);
 
 			// it does NOT affect the nominator.
 			assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]);
@@ -8533,10 +8293,7 @@ fn reenable_lower_offenders_mock() {
 			assert!(is_disabled(21));
 
 			// offence with a higher slash
-			on_offence_now(
-				&[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(50)],
-			);
+			on_offence_now(&[offence_from(31, None)], &[Perbill::from_percent(50)], true);
 
 			// First offender is no longer disabled
 			assert!(!is_disabled(11));
@@ -8551,32 +8308,42 @@ fn reenable_lower_offenders_mock() {
 					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(10),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 },
 					Event::Slashed { staker: 11, amount: 100 },
 					Event::Slashed { staker: 101, amount: 12 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 21,
 						fraction: Perbill::from_percent(20),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 21 },
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 },
 					Event::Slashed { staker: 21, amount: 200 },
 					Event::Slashed { staker: 101, amount: 75 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 31,
 						fraction: Perbill::from_percent(50),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 31 },
-					Event::ValidatorReenabled { stash: 11 },
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 31, page: 0 },
 					Event::Slashed { staker: 31, amount: 250 },
 				]
 			);
+
+			assert!(matches!(
+				session_events().as_slice(),
+				&[
+					..,
+					SessionEvent::ValidatorDisabled { validator: 11 },
+					SessionEvent::ValidatorDisabled { validator: 21 },
+					SessionEvent::ValidatorDisabled { validator: 31 },
+					SessionEvent::ValidatorReenabled { validator: 11 },
+				]
+			));
 		});
 }
 
@@ -8592,33 +8359,17 @@ fn do_not_reenable_higher_offenders_mock() {
 			mock::start_active_era(1);
 			assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]);
 
-			let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11);
-			let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21);
-			let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31);
-
 			// offence with a major slash
 			on_offence_now(
-				&[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(50)],
-			);
-			on_offence_now(
-				&[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(50)],
+				&[offence_from(11, None), offence_from(21, None), offence_from(31, None)],
+				&[Perbill::from_percent(50), Perbill::from_percent(50), Perbill::from_percent(10)],
+				true,
 			);
 
 			// both validators should be disabled
 			assert!(is_disabled(11));
 			assert!(is_disabled(21));
 
-			// offence with a minor slash
-			on_offence_now(
-				&[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }],
-				&[Perbill::from_percent(10)],
-			);
-
-			// First and second offenders are still disabled
-			assert!(is_disabled(11));
-			assert!(is_disabled(21));
 			// New offender is not disabled as limit is reached and his prio is lower
 			assert!(!is_disabled(31));
 
@@ -8628,30 +8379,40 @@ fn do_not_reenable_higher_offenders_mock() {
 					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 11,
 						fraction: Perbill::from_percent(50),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 11 },
-					Event::Slashed { staker: 11, amount: 500 },
-					Event::Slashed { staker: 101, amount: 62 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 21,
 						fraction: Perbill::from_percent(50),
-						slash_era: 1
+						offence_era: 1
 					},
-					Event::ValidatorDisabled { stash: 21 },
-					Event::Slashed { staker: 21, amount: 500 },
-					Event::Slashed { staker: 101, amount: 187 },
-					Event::SlashReported {
+					Event::OffenceReported {
 						validator: 31,
 						fraction: Perbill::from_percent(10),
-						slash_era: 1
+						offence_era: 1
 					},
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 31, page: 0 },
 					Event::Slashed { staker: 31, amount: 50 },
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 },
+					Event::Slashed { staker: 21, amount: 500 },
+					Event::Slashed { staker: 101, amount: 187 },
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 },
+					Event::Slashed { staker: 11, amount: 500 },
+					Event::Slashed { staker: 101, amount: 62 },
 				]
 			);
+
+			assert!(matches!(
+				session_events().as_slice(),
+				&[
+					..,
+					SessionEvent::ValidatorDisabled { validator: 11 },
+					SessionEvent::ValidatorDisabled { validator: 21 },
+				]
+			));
 		});
 }
 
@@ -9245,3 +9006,409 @@ mod hold_migration {
 		});
 	}
 }
+
+mod paged_slashing {
+	use super::*;
+	use crate::slashing::OffenceRecord;
+
+	#[test]
+	fn offence_processed_in_multi_block() {
+		// Ensure each page is processed only once.
+		ExtBuilder::default()
+			.has_stakers(false)
+			.slash_defer_duration(3)
+			.build_and_execute(|| {
+				let base_stake = 1000;
+
+				// Create a validator:
+				bond_validator(11, base_stake);
+				assert_eq!(Validators::<Test>::count(), 1);
+
+				// Track the total exposure of 11.
+				let mut exposure_counter = base_stake;
+
+				// Exposure page size is 64, hence it creates 4 pages of exposure.
+				let expected_page_count = 4;
+				for i in 0..200 {
+					let bond_amount = base_stake + i as Balance;
+					bond_nominator(1000 + i, bond_amount, vec![11]);
+					// with multi page reward payout, payout exposure is same as total exposure.
+					exposure_counter += bond_amount;
+				}
+
+				mock::start_active_era(1);
+
+				assert_eq!(
+					ErasStakersOverview::<Test>::get(1, 11).expect("exposure should exist"),
+					PagedExposureMetadata {
+						total: exposure_counter,
+						own: base_stake,
+						page_count: expected_page_count,
+						nominator_count: 200,
+					}
+				);
+
+				mock::start_active_era(2);
+				System::reset_events();
+
+				// report an offence for 11 in era 1.
+				on_offence_in_era(
+					&[offence_from(11, None)],
+					&[Perbill::from_percent(10)],
+					1,
+					false,
+				);
+
+				// ensure offence is queued.
+				assert_eq!(
+					staking_events_since_last_call().as_slice(),
+					vec![Event::OffenceReported {
+						validator: 11,
+						fraction: Perbill::from_percent(10),
+						offence_era: 1
+					}]
+				);
+
+				// ensure offence queue has items.
+				assert_eq!(
+					OffenceQueue::<Test>::get(1, 11).unwrap(),
+					slashing::OffenceRecord {
+						reporter: None,
+						reported_era: 2,
+						// first page to be marked for processing.
+						exposure_page: expected_page_count - 1,
+						slash_fraction: Perbill::from_percent(10),
+						prior_slash_fraction: Perbill::zero(),
+					}
+				);
+
+				// The offence era is noted in the queue.
+				assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![1]);
+
+				// ensure Processing offence is empty yet.
+				assert_eq!(ProcessingOffence::<Test>::get(), None);
+
+				// ensure no unapplied slashes for era 4 (offence_era + slash_defer_duration).
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 0);
+
+				// Checkpoint 1: advancing to next block will compute the first page of slash.
+				advance_blocks(1);
+
+				// ensure the last page of offence is processed.
+				// (offence is processed in reverse order of pages)
+				assert_eq!(
+					staking_events_since_last_call().as_slice(),
+					vec![Event::SlashComputed {
+						offence_era: 1,
+						slash_era: 4,
+						offender: 11,
+						page: expected_page_count - 1
+					},]
+				);
+
+				// offender is removed from offence queue
+				assert_eq!(OffenceQueue::<Test>::get(1, 11), None);
+
+				// offence era is removed from queue.
+				assert_eq!(OffenceQueueEras::<Test>::get(), None);
+
+				// this offence is not completely processed yet, so it should be in processing.
+				assert_eq!(
+					ProcessingOffence::<Test>::get(),
+					Some((
+						1,
+						11,
+						OffenceRecord {
+							reporter: None,
+							reported_era: 2,
+							// page 3 is processed, next page to be processed is 2.
+							exposure_page: 2,
+							slash_fraction: Perbill::from_percent(10),
+							prior_slash_fraction: Perbill::zero(),
+						}
+					))
+				);
+
+				// unapplied slashes for era 4.
+				let slashes = UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>();
+				// only one unapplied slash exists.
+				assert_eq!(slashes.len(), 1);
+				let (slash_key, unapplied_slash) = &slashes[0];
+				// this is a unique key to ensure unapplied slash is not overwritten for multiple
+				// offence by offender in the same era.
+				assert_eq!(*slash_key, (11, Perbill::from_percent(10), expected_page_count - 1));
+
+				// validator own stake is only included in the first page. Since this is page 3,
+				// only nominators are slashed.
+				assert_eq!(unapplied_slash.own, 0);
+				assert_eq!(unapplied_slash.validator, 11);
+				assert_eq!(unapplied_slash.others.len(), 200 % 64);
+
+				// Checkpoint 2: advancing to next block will compute the second page of slash.
+				advance_blocks(1);
+
+				// offence queue still empty
+				assert_eq!(OffenceQueue::<Test>::get(1, 11), None);
+				assert_eq!(OffenceQueueEras::<Test>::get(), None);
+
+				// processing offence points to next page.
+				assert_eq!(
+					ProcessingOffence::<Test>::get(),
+					Some((
+						1,
+						11,
+						OffenceRecord {
+							reporter: None,
+							reported_era: 2,
+							// page 2 is processed, next page to be processed is 1.
+							exposure_page: 1,
+							slash_fraction: Perbill::from_percent(10),
+							prior_slash_fraction: Perbill::zero(),
+						}
+					))
+				);
+
+				// there are two unapplied slashes for era 4.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 2);
+
+				// ensure the last page of offence is processed.
+				// (offence is processed in reverse order of pages)
+				assert_eq!(
+					staking_events_since_last_call().as_slice(),
+					vec![Event::SlashComputed {
+						offence_era: 1,
+						slash_era: 4,
+						offender: 11,
+						page: expected_page_count - 2
+					},]
+				);
+
+				// Checkpoint 3: advancing to two more blocks will complete the processing of the
+				// reported offence
+				advance_blocks(2);
+
+				// no processing offence.
+				assert!(ProcessingOffence::<Test>::get().is_none());
+				// total of 4 unapplied slash.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 4);
+
+				// Checkpoint 4: lets verify the application of slashes in multiple blocks.
+				// advance to era 4.
+				mock::start_active_era(4);
+				// slashes are not applied just yet. From next blocks, they will be applied.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 4);
+
+				// advance to next block.
+				advance_blocks(1);
+				// 1 slash is applied.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 3);
+
+				// advance two blocks.
+				advance_blocks(2);
+				// 2 more slashes are applied.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 1);
+
+				// advance one more block.
+				advance_blocks(1);
+				// all slashes are applied.
+				assert_eq!(UnappliedSlashes::<Test>::iter_prefix(&4).collect::<Vec<_>>().len(), 0);
+
+				// ensure all stakers are slashed correctly.
+				assert_eq!(asset::staked::<Test>(&11), 1000 - 100);
+
+				for i in 0..200 {
+					let original_stake = 1000 + i as Balance;
+					let expected_slash = Perbill::from_percent(10) * original_stake;
+					assert_eq!(asset::staked::<Test>(&(1000 + i)), original_stake - expected_slash);
+				}
+			})
+	}
+
+	#[test]
+	fn offence_discarded_correctly() {
+		ExtBuilder::default().slash_defer_duration(3).build_and_execute(|| {
+			start_active_era(2);
+
+			// Scenario 1: 11 commits an offence in era 2.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(10)], 2, false);
+
+			// offence is queued, not processed yet.
+			let queued_offence_one = OffenceQueue::<Test>::get(2, 11).unwrap();
+			assert_eq!(queued_offence_one.slash_fraction, Perbill::from_percent(10));
+			assert_eq!(queued_offence_one.prior_slash_fraction, Perbill::zero());
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![2]);
+
+			// Scenario 1A: 11 commits a second offence in era 2 with **lower** slash fraction than
+			// the previous offence.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(5)], 2, false);
+
+			// the second offence is discarded. No change in the queue.
+			assert_eq!(OffenceQueue::<Test>::get(2, 11).unwrap(), queued_offence_one);
+
+			// Scenario 1B: 11 commits a second offence in era 2 with **higher** slash fraction than
+			// the previous offence.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(15)], 2, false);
+
+			// the second offence overwrites the first offence.
+			let overwritten_offence = OffenceQueue::<Test>::get(2, 11).unwrap();
+			assert!(overwritten_offence.slash_fraction > queued_offence_one.slash_fraction);
+			assert_eq!(overwritten_offence.slash_fraction, Perbill::from_percent(15));
+			assert_eq!(overwritten_offence.prior_slash_fraction, Perbill::zero());
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![2]);
+
+			// Scenario 2: 11 commits another offence in era 2, but after the previous offence is
+			// processed.
+			advance_blocks(1);
+			assert!(OffenceQueue::<Test>::get(2, 11).is_none());
+			assert!(OffenceQueueEras::<Test>::get().is_none());
+			// unapplied slash is created for the offence.
+			assert!(UnappliedSlashes::<Test>::contains_key(
+				2 + 3,
+				(11, Perbill::from_percent(15), 0)
+			));
+
+			// Scenario 2A: offence has **lower** slash fraction than the previous offence.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(14)], 2, false);
+			// offence is discarded.
+			assert!(OffenceQueue::<Test>::get(2, 11).is_none());
+			assert!(OffenceQueueEras::<Test>::get().is_none());
+
+			// Scenario 2B: offence has **higher** slash fraction than the previous offence.
+			on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(16)], 2, false);
+			// process offence
+			advance_blocks(1);
+			// there are now two slash records for 11, for era 5, with the newer one only slashing
+			// the diff between slash fractions of 16 and 15.
+			let slash_one =
+				UnappliedSlashes::<Test>::get(2 + 3, (11, Perbill::from_percent(15), 0)).unwrap();
+			let slash_two =
+				UnappliedSlashes::<Test>::get(2 + 3, (11, Perbill::from_percent(16), 0)).unwrap();
+			assert!(slash_one.own > slash_two.own);
+		});
+	}
+
+	#[test]
+	fn offence_eras_queued_correctly() {
+		ExtBuilder::default().build_and_execute(|| {
+			// 11 and 21 are validators.
+			assert_eq!(Staking::status(&11).unwrap(), StakerStatus::Validator);
+			assert_eq!(Staking::status(&21).unwrap(), StakerStatus::Validator);
+
+			start_active_era(2);
+
+			// 11 and 21 commits offence in era 2.
+			on_offence_in_era(
+				&[offence_from(11, None), offence_from(21, None)],
+				&[Perbill::from_percent(10), Perbill::from_percent(20)],
+				2,
+				false,
+			);
+
+			// 11 and 21 commits offence in era 1 but reported after the era 2 offence.
+			on_offence_in_era(
+				&[offence_from(11, None), offence_from(21, None)],
+				&[Perbill::from_percent(10), Perbill::from_percent(20)],
+				1,
+				false,
+			);
+
+			// queued offence eras are sorted.
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![1, 2]);
+
+			// next two blocks, the offence in era 1 is processed.
+			advance_blocks(2);
+
+			// only era 2 is left in the queue.
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![2]);
+
+			// next block, the offence in era 2 is processed.
+			advance_blocks(1);
+
+			// era still exist in the queue.
+			assert_eq!(OffenceQueueEras::<Test>::get().unwrap(), vec![2]);
+
+			// next block, the era 2 is processed.
+			advance_blocks(1);
+
+			// queue is empty.
+			assert_eq!(OffenceQueueEras::<Test>::get(), None);
+		});
+	}
+	#[test]
+	fn non_deferred_slash_applied_instantly() {
+		ExtBuilder::default().build_and_execute(|| {
+			mock::start_active_era(2);
+			let validator_stake = asset::staked::<Test>(&11);
+			let slash_fraction = Perbill::from_percent(10);
+			let expected_slash = slash_fraction * validator_stake;
+			System::reset_events();
+
+			// report an offence for 11 in era 1.
+			on_offence_in_era(&[offence_from(11, None)], &[slash_fraction], 1, false);
+
+			// ensure offence is queued.
+			assert_eq!(
+				staking_events_since_last_call().as_slice(),
+				vec![Event::OffenceReported {
+					validator: 11,
+					fraction: Perbill::from_percent(10),
+					offence_era: 1
+				}]
+			);
+
+			// process offence
+			advance_blocks(1);
+
+			// ensure slash is computed and applied.
+			assert_eq!(
+				staking_events_since_last_call().as_slice(),
+				vec![
+					Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 },
+					Event::Slashed { staker: 11, amount: expected_slash },
+					// this is the nominator of 11.
+					Event::Slashed { staker: 101, amount: 12 },
+				]
+			);
+
+			// ensure validator is slashed.
+			assert_eq!(asset::staked::<Test>(&11), validator_stake - expected_slash);
+		});
+	}
+
+	#[test]
+	fn validator_with_no_exposure_slashed() {
+		ExtBuilder::default().build_and_execute(|| {
+			let validator_stake = asset::staked::<Test>(&11);
+			let slash_fraction = Perbill::from_percent(10);
+			let expected_slash = slash_fraction * validator_stake;
+
+			// only 101 nominates 11, lets remove them.
+			assert_ok!(Staking::nominate(RuntimeOrigin::signed(101), vec![21]));
+
+			start_active_era(2);
+			// ensure validator has no exposure.
+			assert_eq!(ErasStakersOverview::<Test>::get(2, 11).unwrap().page_count, 0,);
+
+			// clear events
+			System::reset_events();
+
+			// report an offence for 11.
+			on_offence_now(&[offence_from(11, None)], &[slash_fraction], true);
+
+			// ensure validator is slashed.
+			assert_eq!(asset::staked::<Test>(&11), validator_stake - expected_slash);
+			assert_eq!(
+				staking_events_since_last_call().as_slice(),
+				vec![
+					Event::OffenceReported {
+						offence_era: 2,
+						validator: 11,
+						fraction: slash_fraction
+					},
+					Event::SlashComputed { offence_era: 2, slash_era: 2, offender: 11, page: 0 },
+					Event::Slashed { staker: 11, amount: expected_slash },
+				]
+			);
+		});
+	}
+}
diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs
index 92fe0e176a2e6cd6383ac23fd7a48d41135887f6..36b7be744986634102e94b19e80678611a4a6e28 100644
--- a/substrate/frame/staking/src/weights.rs
+++ b/substrate/frame/staking/src/weights.rs
@@ -84,6 +84,7 @@ pub trait WeightInfo {
 	fn set_min_commission() -> Weight;
 	fn restore_ledger() -> Weight;
 	fn migrate_currency() -> Weight;
+	fn apply_slash() -> Weight;
 }
 
 /// Weights for `pallet_staking` using the Substrate node and recommended hardware.
@@ -815,6 +816,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 			.saturating_add(T::DbWeight::get().reads(6_u64))
 			.saturating_add(T::DbWeight::get().writes(2_u64))
 	}
+	fn apply_slash() -> Weight {
+		// TODO CI-FAIL: run CI bench bot
+		Weight::zero()
+	}
 }
 
 // For backwards compatibility and tests.
@@ -1545,4 +1550,8 @@ impl WeightInfo for () {
 			.saturating_add(RocksDbWeight::get().reads(6_u64))
 			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
+	fn apply_slash() -> Weight {
+		// TODO CI-FAIL: run CI bench bot
+		Weight::zero()
+	}
 }
diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs
index 281012ffb4c956ed31913d45332e0f7b7ce742d9..0f1897872cc1f27a2c9275f4879e8f9e1823c455 100644
--- a/substrate/frame/treasury/src/lib.rs
+++ b/substrate/frame/treasury/src/lib.rs
@@ -324,7 +324,7 @@ pub mod pallet {
 
 	/// The count of spends that have been made.
 	#[pallet::storage]
-	pub(crate) type SpendCount<T, I = ()> = StorageValue<_, SpendIndex, ValueQuery>;
+	pub type SpendCount<T, I = ()> = StorageValue<_, SpendIndex, ValueQuery>;
 
 	/// Spends that have been approved and being processed.
 	// Hasher: Twox safe since `SpendIndex` is an internal count based index.
@@ -345,7 +345,7 @@ pub mod pallet {
 
 	/// The blocknumber for the last triggered spend period.
 	#[pallet::storage]
-	pub(crate) type LastSpendPeriod<T, I = ()> = StorageValue<_, BlockNumberFor<T, I>, OptionQuery>;
+	pub type LastSpendPeriod<T, I = ()> = StorageValue<_, BlockNumberFor<T, I>, OptionQuery>;
 
 	#[pallet::genesis_config]
 	#[derive(frame_support::DefaultNoBound)]
diff --git a/substrate/frame/uniques/src/lib.rs b/substrate/frame/uniques/src/lib.rs
index 84f122c08bb7b4c10ee177d78dcfb5e69501e201..01548418a04993b5667210f4d76566e1a4e3aee2 100644
--- a/substrate/frame/uniques/src/lib.rs
+++ b/substrate/frame/uniques/src/lib.rs
@@ -182,7 +182,7 @@ pub mod pallet {
 	#[pallet::storage]
 	/// The items held by any given account; set out this way so that items owned by a single
 	/// account can be enumerated.
-	pub(super) type Account<T: Config<I>, I: 'static = ()> = StorageNMap<
+	pub type Account<T: Config<I>, I: 'static = ()> = StorageNMap<
 		_,
 		(
 			NMapKey<Blake2_128Concat, T::AccountId>, // owner
@@ -197,7 +197,7 @@ pub mod pallet {
 	#[pallet::storage_prefix = "ClassAccount"]
 	/// The collections owned by any given account; set out this way so that collections owned by
 	/// a single account can be enumerated.
-	pub(super) type CollectionAccount<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
+	pub type CollectionAccount<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
 		_,
 		Blake2_128Concat,
 		T::AccountId,
@@ -246,7 +246,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Attributes of a collection.
-	pub(super) type Attribute<T: Config<I>, I: 'static = ()> = StorageNMap<
+	pub type Attribute<T: Config<I>, I: 'static = ()> = StorageNMap<
 		_,
 		(
 			NMapKey<Blake2_128Concat, T::CollectionId>,
@@ -271,7 +271,7 @@ pub mod pallet {
 
 	#[pallet::storage]
 	/// Keeps track of the number of items a collection might have.
-	pub(super) type CollectionMaxSupply<T: Config<I>, I: 'static = ()> =
+	pub type CollectionMaxSupply<T: Config<I>, I: 'static = ()> =
 		StorageMap<_, Blake2_128Concat, T::CollectionId, u32, OptionQuery>;
 
 	#[pallet::event]
diff --git a/substrate/frame/uniques/src/types.rs b/substrate/frame/uniques/src/types.rs
index e2e170c72f21ae65912147546f789db74b60550b..6451e3800da8d8866a01c970aa4f7ec1d733f745 100644
--- a/substrate/frame/uniques/src/types.rs
+++ b/substrate/frame/uniques/src/types.rs
@@ -25,16 +25,15 @@ use frame_support::{
 use scale_info::TypeInfo;
 
 /// A type alias for handling balance deposits.
-pub(super) type DepositBalanceOf<T, I = ()> =
+pub type DepositBalanceOf<T, I = ()> =
 	<<T as Config<I>>::Currency as Currency<<T as SystemConfig>::AccountId>>::Balance;
 /// A type alias representing the details of a collection.
-pub(super) type CollectionDetailsFor<T, I> =
+pub type CollectionDetailsFor<T, I> =
 	CollectionDetails<<T as SystemConfig>::AccountId, DepositBalanceOf<T, I>>;
 /// A type alias for the details of a single item.
-pub(super) type ItemDetailsFor<T, I> =
-	ItemDetails<<T as SystemConfig>::AccountId, DepositBalanceOf<T, I>>;
+pub type ItemDetailsFor<T, I> = ItemDetails<<T as SystemConfig>::AccountId, DepositBalanceOf<T, I>>;
 /// A type alias to represent the price of an item.
-pub(super) type ItemPrice<T, I = ()> =
+pub type ItemPrice<T, I = ()> =
 	<<T as Config<I>>::Currency as Currency<<T as SystemConfig>::AccountId>>::Balance;
 
 #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
diff --git a/substrate/frame/vesting/src/lib.rs b/substrate/frame/vesting/src/lib.rs
index 15f8d397f81c9c345348a16bca290d6ab86837cc..9e3e30106f34126f0a61df7854f5358732ff49cf 100644
--- a/substrate/frame/vesting/src/lib.rs
+++ b/substrate/frame/vesting/src/lib.rs
@@ -96,7 +96,7 @@ const VESTING_ID: LockIdentifier = *b"vesting ";
 // A value placed in storage that represents the current version of the Vesting storage.
 // This value is used by `on_runtime_upgrade` to determine whether we run storage migration logic.
 #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
-enum Releases {
+pub enum Releases {
 	V0,
 	V1,
 }
@@ -179,7 +179,28 @@ pub mod pallet {
 		/// the unvested amount.
 		type UnvestedFundsAllowedWithdrawReasons: Get<WithdrawReasons>;
 
-		/// Provider for the block number.
+		/// Query the current block number.
+		///
+		/// Must return monotonically increasing values when called from consecutive blocks.
+		/// Can be configured to return either:
+		/// - the local block number of the runtime via `frame_system::Pallet`
+		/// - a remote block number, eg from the relay chain through `RelaychainDataProvider`
+		/// - an arbitrary value through a custom implementation of the trait
+		///
+		/// There is currently no migration provided to "hot-swap" block number providers and it may
+		/// result in undefined behavior when doing so. Parachains are therefore best off setting
+		/// this to their local block number provider if they have the pallet already deployed.
+		///
+		/// Suggested values:
+		/// - Solo- and Relay-chains: `frame_system::Pallet`
+		/// - Parachains that may produce blocks sparingly or only when needed (on-demand):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: `RelaychainDataProvider`
+		/// - Parachains with a reliably block production rate (PLO or bulk-coretime):
+		///   - already have the pallet deployed: `frame_system::Pallet`
+		///   - are freshly deploying this pallet: no strong recommendation. Both local and remote
+		///     providers can be used. Relay provider can be a bit better in cases where the
+		///     parachain is lagging its block production to avoid clock skew.
 		type BlockNumberProvider: BlockNumberProvider<BlockNumber = BlockNumberFor<Self>>;
 
 		/// Maximum number of vesting schedules an account may have at a given moment.
@@ -214,7 +235,7 @@ pub mod pallet {
 	///
 	/// New networks start with latest version, as determined by the genesis build.
 	#[pallet::storage]
-	pub(crate) type StorageVersion<T: Config> = StorageValue<_, Releases, ValueQuery>;
+	pub type StorageVersion<T: Config> = StorageValue<_, Releases, ValueQuery>;
 
 	#[pallet::pallet]
 	pub struct Pallet<T>(_);
diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs
index 1e9155f59a57a8c9f5f34ef946fd3b39113e7a74..b71e91d422d51b8a37276e08d1312fc1e9b10f81 100644
--- a/templates/parachain/runtime/src/configs/mod.rs
+++ b/templates/parachain/runtime/src/configs/mod.rs
@@ -274,6 +274,7 @@ impl pallet_session::Config for Runtime {
 	// Essentially just Aura, but let's be pedantic.
 	type SessionHandler = <SessionKeys as sp_runtime::traits::OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
+	type DisablingStrategy = ();
 	type WeightInfo = ();
 }
 
diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml
index 8cd5cf0c838e6987baa662f21f8c36666360643f..9e0a75d40844a49a80796f05027c0e3995b3f852 100644
--- a/umbrella/Cargo.toml
+++ b/umbrella/Cargo.toml
@@ -136,6 +136,8 @@ std = [
 	"pallet-session?/std",
 	"pallet-skip-feeless-payment?/std",
 	"pallet-society?/std",
+	"pallet-staking-ah-client?/std",
+	"pallet-staking-rc-client?/std",
 	"pallet-staking-reward-fn?/std",
 	"pallet-staking-runtime-api?/std",
 	"pallet-staking?/std",
@@ -324,6 +326,8 @@ runtime-benchmarks = [
 	"pallet-session-benchmarking?/runtime-benchmarks",
 	"pallet-skip-feeless-payment?/runtime-benchmarks",
 	"pallet-society?/runtime-benchmarks",
+	"pallet-staking-ah-client?/runtime-benchmarks",
+	"pallet-staking-rc-client?/runtime-benchmarks",
 	"pallet-staking?/runtime-benchmarks",
 	"pallet-state-trie-migration?/runtime-benchmarks",
 	"pallet-sudo?/runtime-benchmarks",
@@ -464,6 +468,8 @@ try-runtime = [
 	"pallet-session?/try-runtime",
 	"pallet-skip-feeless-payment?/try-runtime",
 	"pallet-society?/try-runtime",
+	"pallet-staking-ah-client?/try-runtime",
+	"pallet-staking-rc-client?/try-runtime",
 	"pallet-staking?/try-runtime",
 	"pallet-state-trie-migration?/try-runtime",
 	"pallet-statement?/try-runtime",
@@ -552,7 +558,7 @@ with-tracing = [
 	"sp-tracing?/with-tracing",
 	"sp-tracing?/with-tracing",
 ]
-runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-assets-holder", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-block", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
+runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-assets-holder", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-block", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-ah-client", "pallet-staking-rc-client", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
 runtime = [
 	"frame-benchmarking",
 	"frame-benchmarking-pallet-pov",
@@ -1289,6 +1295,16 @@ default-features = false
 optional = true
 path = "../substrate/frame/staking"
 
+[dependencies.pallet-staking-ah-client]
+default-features = false
+optional = true
+path = "../substrate/frame/staking/ah-client"
+
+[dependencies.pallet-staking-rc-client]
+default-features = false
+optional = true
+path = "../substrate/frame/staking/rc-client"
+
 [dependencies.pallet-staking-reward-curve]
 default-features = false
 optional = true
diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs
index 89cd300b418f64d41c75a8919a41f018b63749c4..19f80aac4a451d4d36cd0c58e7c91870fb80e625 100644
--- a/umbrella/src/lib.rs
+++ b/umbrella/src/lib.rs
@@ -657,6 +657,16 @@ pub use pallet_society;
 #[cfg(feature = "pallet-staking")]
 pub use pallet_staking;
 
+/// Pallet handling the communication with staking-rc-client. It's role is to glue the staking
+/// pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way.
+#[cfg(feature = "pallet-staking-ah-client")]
+pub use pallet_staking_ah_client;
+
+/// Pallet handling the communication with staking-ah-client. It's role is to glue the staking
+/// pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way.
+#[cfg(feature = "pallet-staking-rc-client")]
+pub use pallet_staking_rc_client;
+
 /// Reward Curve for FRAME staking pallet.
 #[cfg(feature = "pallet-staking-reward-curve")]
 pub use pallet_staking_reward_curve;